From f39f27fdb761ccfa84919d937b1d419afe1c934e Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Thu, 8 Oct 2020 23:11:54 +0800 Subject: [PATCH 01/12] add tmpdir to tests in test_workflow.py and test_subtmitter.py --- pydra/engine/tests/test_submitter.py | 23 ++- pydra/engine/tests/test_workflow.py | 269 +++++++++++++++++---------- 2 files changed, 187 insertions(+), 105 deletions(-) diff --git a/pydra/engine/tests/test_submitter.py b/pydra/engine/tests/test_submitter.py index 3f450495a3..0d40b7f1b0 100644 --- a/pydra/engine/tests/test_submitter.py +++ b/pydra/engine/tests/test_submitter.py @@ -21,8 +21,10 @@ def sleep_add_one(x): return x + 1 -def test_callable_wf(plugin): +def test_callable_wf(plugin, tmpdir): wf = gen_basic_wf() + wf.cache_dir = tmpdir + with pytest.raises(NotImplementedError): wf() @@ -36,7 +38,7 @@ def test_callable_wf(plugin): assert res.output.out == 9 -def test_concurrent_wf(plugin): +def test_concurrent_wf(plugin, tmpdir): # concurrent workflow # A --> C # B --> D @@ -48,6 +50,8 @@ def test_concurrent_wf(plugin): wf.add(sleep_add_one(name="taskc", x=wf.taska.lzout.out)) wf.add(sleep_add_one(name="taskd", x=wf.taskb.lzout.out)) wf.set_output([("out1", wf.taskc.lzout.out), ("out2", wf.taskd.lzout.out)]) + wf.cache_dir = tmpdir + with Submitter(plugin) as sub: sub(wf) @@ -56,7 +60,7 @@ def test_concurrent_wf(plugin): assert res.output.out2 == 12 -def test_concurrent_wf_nprocs(): +def test_concurrent_wf_nprocs(tmpdir): # concurrent workflow # setting n_procs in Submitter that is passed to the worker # A --> C @@ -69,8 +73,8 @@ def test_concurrent_wf_nprocs(): wf.add(sleep_add_one(name="taskc", x=wf.taska.lzout.out)) wf.add(sleep_add_one(name="taskd", x=wf.taskb.lzout.out)) wf.set_output([("out1", wf.taskc.lzout.out), ("out2", wf.taskd.lzout.out)]) - # wf.plugin = 'cf' - # res = wf.run() + wf.cache_dir = tmpdir + with Submitter("cf", n_procs=2) as sub: sub(wf) @@ -79,7 +83,7 @@ def test_concurrent_wf_nprocs(): assert res.output.out2 == 12 -def test_wf_in_wf(plugin): +def test_wf_in_wf(plugin, tmpdir): """WF(A --> SUBWF(A --> B) --> B)""" wf = Workflow(name="wf_in_wf", input_spec=["x"]) wf.inputs.x = 3 @@ -96,6 +100,7 @@ def test_wf_in_wf(plugin): wf.add(sleep_add_one(name="wf_b", x=wf.sub_wf.lzout.out)) wf.set_output([("out", wf.wf_b.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin) as sub: sub(wf) @@ -105,7 +110,7 @@ def test_wf_in_wf(plugin): @pytest.mark.flaky(reruns=2) # when dask -def test_wf2(plugin_dask_opt): +def test_wf2(plugin_dask_opt, tmpdir): """ workflow as a node workflow-node with one task and no splitter """ @@ -117,6 +122,7 @@ def test_wf2(plugin_dask_opt): wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin_dask_opt) as sub: sub(wf) @@ -126,7 +132,7 @@ def test_wf2(plugin_dask_opt): @pytest.mark.flaky(reruns=2) # when dask -def test_wf_with_state(plugin_dask_opt): +def test_wf_with_state(plugin_dask_opt, tmpdir): wf = Workflow(name="wf_with_state", input_spec=["x"]) wf.add(sleep_add_one(name="taska", x=wf.lzin.x)) wf.add(sleep_add_one(name="taskb", x=wf.taska.lzout.out)) @@ -134,6 +140,7 @@ def test_wf_with_state(plugin_dask_opt): wf.inputs.x = [1, 2, 3] wf.split("x") wf.set_output([("out", wf.taskb.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin_dask_opt) as sub: sub(wf) diff --git a/pydra/engine/tests/test_workflow.py b/pydra/engine/tests/test_workflow.py index 085f656c33..50265d3eb9 100644 --- a/pydra/engine/tests/test_workflow.py +++ b/pydra/engine/tests/test_workflow.py @@ -48,7 +48,7 @@ def test_wf_name_conflict2(): assert "Another task named task_name is already added" in str(excinfo.value) -def test_wf_no_output(plugin): +def test_wf_no_output(plugin, tmpdir): """ Raise error when output isn't set with set_output""" wf = Workflow(name="wf_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) @@ -60,12 +60,13 @@ def test_wf_no_output(plugin): assert "Workflow output cannot be None" in str(excinfo.value) -def test_wf_1(plugin): +def test_wf_1(plugin, tmpdir): """ workflow with one task and no splitter""" wf = Workflow(name="wf_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -77,7 +78,7 @@ def test_wf_1(plugin): assert wf.output_dir.exists() -def test_wf_1a_outpastuple(plugin): +def test_wf_1a_outpastuple(plugin, tmpdir): """ workflow with one task and no splitter set_output takes a tuple """ @@ -86,6 +87,7 @@ def test_wf_1a_outpastuple(plugin): wf.set_output(("out", wf.add2.lzout.out)) wf.inputs.x = 2 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -95,12 +97,13 @@ def test_wf_1a_outpastuple(plugin): assert wf.output_dir.exists() -def test_wf_1_call_subm(plugin): +def test_wf_1_call_subm(plugin, tmpdir): """using wf.__call_ with submitter""" wf = Workflow(name="wf_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: wf(submitter=sub) @@ -110,13 +113,14 @@ def test_wf_1_call_subm(plugin): assert wf.output_dir.exists() -def test_wf_1_call_plug(plugin): +def test_wf_1_call_plug(plugin, tmpdir): """using wf.__call_ with plugin""" wf = Workflow(name="wf_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 wf.plugin = plugin + wf.cache_dir = tmpdir wf(plugin=plugin) @@ -125,13 +129,14 @@ def test_wf_1_call_plug(plugin): assert wf.output_dir.exists() -def test_wf_1_call_exception(plugin): +def test_wf_1_call_exception(plugin, tmpdir): """using wf.__call_ with plugin and submitter - should raise an exception""" wf = Workflow(name="wf_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: with pytest.raises(Exception) as e: @@ -139,7 +144,7 @@ def test_wf_1_call_exception(plugin): assert "Specify submitter OR plugin" in str(e.value) -def test_wf_2(plugin): +def test_wf_2(plugin, tmpdir): """ workflow with 2 tasks, no splitter""" wf = Workflow(name="wf_2", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) @@ -157,7 +162,7 @@ def test_wf_2(plugin): assert 8 == results.output.out -def test_wf_2a(plugin): +def test_wf_2a(plugin, tmpdir): """ workflow with 2 tasks, no splitter creating add2_task first (before calling add method), """ @@ -170,6 +175,7 @@ def test_wf_2a(plugin): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -179,7 +185,7 @@ def test_wf_2a(plugin): assert wf.output_dir.exists() -def test_wf_2b(plugin): +def test_wf_2b(plugin, tmpdir): """ workflow with 2 tasks, no splitter creating add2_task first (before calling add method), adding inputs.x after add method @@ -193,6 +199,7 @@ def test_wf_2b(plugin): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -203,7 +210,7 @@ def test_wf_2b(plugin): assert wf.output_dir.exists() -def test_wf_2c_multoutp(plugin): +def test_wf_2c_multoutp(plugin, tmpdir): """ workflow with 2 tasks, no splitter setting multiple outputs for the workflow """ @@ -217,6 +224,7 @@ def test_wf_2c_multoutp(plugin): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -228,7 +236,7 @@ def test_wf_2c_multoutp(plugin): assert wf.output_dir.exists() -def test_wf_2d_outpasdict(plugin): +def test_wf_2d_outpasdict(plugin, tmpdir): """ workflow with 2 tasks, no splitter setting multiple outputs using a dictionary """ @@ -242,6 +250,7 @@ def test_wf_2d_outpasdict(plugin): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -262,6 +271,7 @@ def test_wf_3(plugin_dask_opt): wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 wf.inputs.y = None + wf.cache_dir = tmpdir with Submitter(plugin=plugin_dask_opt) as sub: sub(wf) @@ -272,7 +282,7 @@ def test_wf_3(plugin_dask_opt): @pytest.mark.xfail(reason="the task error doesn't propagate") -def test_wf_3a_exception(plugin): +def test_wf_3a_exception(plugin, tmpdir): """ testinh wf without set input, attr.NOTHING should be set and the function should raise an exception """ @@ -283,6 +293,7 @@ def test_wf_3a_exception(plugin): wf.inputs.x = 2 wf.inputs.y = attr.NOTHING wf.plugin = plugin + wf.cache_dir = tmpdir with pytest.raises(TypeError) as excinfo: with Submitter(plugin=plugin) as sub: @@ -290,7 +301,7 @@ def test_wf_3a_exception(plugin): assert "unsupported" in str(excinfo.value) -def test_wf_4(plugin): +def test_wf_4(plugin, tmpdir): """wf with a task that doesn't set one input and use the function default value""" wf = Workflow(name="wf_4", input_spec=["x", "y"]) wf.add(fun_addvar_default(name="addvar", a=wf.lzin.x)) @@ -298,6 +309,7 @@ def test_wf_4(plugin): wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -307,7 +319,7 @@ def test_wf_4(plugin): assert 5 == results.output.out -def test_wf_4a(plugin): +def test_wf_4a(plugin, tmpdir): """ wf with a task that doesn't set one input, the unset input is send to the task input, so the task should use the function default value @@ -318,6 +330,7 @@ def test_wf_4a(plugin): wf.set_output([("out", wf.add2.lzout.out)]) wf.inputs.x = 2 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -327,13 +340,14 @@ def test_wf_4a(plugin): assert 5 == results.output.out -def test_wf_5(plugin): +def test_wf_5(plugin, tmpdir): """ wf with two outputs connected to the task outputs one set_output """ wf = Workflow(name="wf_5", input_spec=["x", "y"], x=3, y=2) wf.add(fun_addsubvar(name="addsub", a=wf.lzin.x, b=wf.lzin.y)) wf.set_output([("out_sum", wf.addsub.lzout.sum), ("out_sub", wf.addsub.lzout.sub)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -343,7 +357,7 @@ def test_wf_5(plugin): assert 1 == results.output.out_sub -def test_wf_5a(plugin): +def test_wf_5a(plugin, tmpdir): """ wf with two outputs connected to the task outputs, set_output set twice """ @@ -351,6 +365,7 @@ def test_wf_5a(plugin): wf.add(fun_addsubvar(name="addsub", a=wf.lzin.x, b=wf.lzin.y)) wf.set_output([("out_sum", wf.addsub.lzout.sum)]) wf.set_output([("out_sub", wf.addsub.lzout.sub)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -365,13 +380,14 @@ def test_wf_5b_exception(): wf = Workflow(name="wf_5", input_spec=["x", "y"], x=3, y=2) wf.add(fun_addsubvar(name="addsub", a=wf.lzin.x, b=wf.lzin.y)) wf.set_output([("out", wf.addsub.lzout.sum)]) + wf.cache_dir = tmpdir with pytest.raises(Exception) as excinfo: wf.set_output([("out", wf.addsub.lzout.sub)]) assert "is already set" in str(excinfo.value) -def test_wf_6(plugin): +def test_wf_6(plugin, tmpdir): """ wf with two tasks and two outputs connected to both tasks, one set_output """ @@ -379,6 +395,7 @@ def test_wf_6(plugin): wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) wf.add(add2(name="add2", x=wf.mult.lzout.out)) wf.set_output([("out1", wf.mult.lzout.out), ("out2", wf.add2.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -389,7 +406,7 @@ def test_wf_6(plugin): assert 8 == results.output.out2 -def test_wf_6a(plugin): +def test_wf_6a(plugin, tmpdir): """ wf with two tasks and two outputs connected to both tasks, set_output used twice """ @@ -398,6 +415,7 @@ def test_wf_6a(plugin): wf.add(add2(name="add2", x=wf.mult.lzout.out)) wf.set_output([("out1", wf.mult.lzout.out)]) wf.set_output([("out2", wf.add2.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -408,7 +426,7 @@ def test_wf_6a(plugin): assert 8 == results.output.out2 -def test_wf_st_1(plugin): +def test_wf_st_1(plugin, tmpdir): """ Workflow with one task, a splitter for the workflow""" wf = Workflow(name="wf_spl_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) @@ -417,6 +435,7 @@ def test_wf_st_1(plugin): wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -433,7 +452,7 @@ def test_wf_st_1(plugin): assert odir.exists() -def test_wf_st_1_call_subm(plugin): +def test_wf_st_1_call_subm(plugin, tmpdir): """ Workflow with one task, a splitter for the workflow""" wf = Workflow(name="wf_spl_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) @@ -442,6 +461,7 @@ def test_wf_st_1_call_subm(plugin): wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: wf(submitter=sub) @@ -456,7 +476,7 @@ def test_wf_st_1_call_subm(plugin): assert odir.exists() -def test_wf_st_1_call_plug(plugin): +def test_wf_st_1_call_plug(plugin, tmpdir): """ Workflow with one task, a splitter for the workflow""" wf = Workflow(name="wf_spl_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) @@ -465,6 +485,7 @@ def test_wf_st_1_call_plug(plugin): wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir wf(plugin=plugin) @@ -478,7 +499,7 @@ def test_wf_st_1_call_plug(plugin): assert odir.exists() -def test_wf_st_noinput_1(plugin): +def test_wf_st_noinput_1(plugin, tmpdir): """ Workflow with one task, a splitter for the workflow""" wf = Workflow(name="wf_spl_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) @@ -487,6 +508,7 @@ def test_wf_st_noinput_1(plugin): wf.inputs.x = [] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -499,13 +521,14 @@ def test_wf_st_noinput_1(plugin): assert wf.output_dir == [] -def test_wf_ndst_1(plugin): +def test_wf_ndst_1(plugin, tmpdir): """ workflow with one task, a splitter on the task level""" wf = Workflow(name="wf_spl_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x).split("x")) wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -518,7 +541,7 @@ def test_wf_ndst_1(plugin): assert wf.output_dir.exists() -def test_wf_ndst_updatespl_1(plugin): +def test_wf_ndst_updatespl_1(plugin, tmpdir): """ workflow with one task, a splitter on the task level is added *after* calling add """ @@ -527,6 +550,7 @@ def test_wf_ndst_updatespl_1(plugin): wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir wf.add2.split("x") with Submitter(plugin=plugin) as sub: @@ -540,7 +564,7 @@ def test_wf_ndst_updatespl_1(plugin): assert wf.output_dir.exists() -def test_wf_ndst_updatespl_1a(plugin): +def test_wf_ndst_updatespl_1a(plugin, tmpdir): """ workflow with one task (initialize before calling add), a splitter on the task level is added *after* calling add """ @@ -551,6 +575,7 @@ def test_wf_ndst_updatespl_1a(plugin): wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -563,7 +588,7 @@ def test_wf_ndst_updatespl_1a(plugin): assert wf.output_dir.exists() -def test_wf_ndst_updateinp_1(plugin): +def test_wf_ndst_updateinp_1(plugin, tmpdir): """ workflow with one task, a splitter on the task level, updating input of the task after calling add @@ -576,6 +601,7 @@ def test_wf_ndst_updateinp_1(plugin): wf.plugin = plugin wf.add2.split("x") wf.add2.inputs.x = wf.lzin.y + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -587,13 +613,14 @@ def test_wf_ndst_updateinp_1(plugin): assert wf.output_dir.exists() -def test_wf_ndst_noinput_1(plugin): +def test_wf_ndst_noinput_1(plugin, tmpdir): """ workflow with one task, a splitter on the task level""" wf = Workflow(name="wf_spl_1", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x).split("x")) wf.inputs.x = [] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -606,7 +633,7 @@ def test_wf_ndst_noinput_1(plugin): assert wf.output_dir.exists() -def test_wf_st_2(plugin): +def test_wf_st_2(plugin, tmpdir): """ workflow with one task, splitters and combiner for workflow""" wf = Workflow(name="wf_st_2", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x)) @@ -615,6 +642,7 @@ def test_wf_st_2(plugin): wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -629,13 +657,14 @@ def test_wf_st_2(plugin): assert odir.exists() -def test_wf_ndst_2(plugin): +def test_wf_ndst_2(plugin, tmpdir): """ workflow with one task, splitters and combiner on the task level""" wf = Workflow(name="wf_ndst_2", input_spec=["x"]) wf.add(add2(name="add2", x=wf.lzin.x).split("x").combine(combiner="x")) wf.inputs.x = [1, 2] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -649,7 +678,7 @@ def test_wf_ndst_2(plugin): # workflows with structures A -> B -def test_wf_st_3(plugin): +def test_wf_st_3(plugin, tmpdir): """ workflow with 2 tasks, splitter on wf level""" wf = Workflow(name="wfst_3", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) @@ -659,6 +688,7 @@ def test_wf_st_3(plugin): wf.split(("x", "y")) wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -696,7 +726,7 @@ def test_wf_st_3(plugin): assert odir.exists() -def test_wf_ndst_3(plugin): +def test_wf_ndst_3(plugin, tmpdir): """Test workflow with 2 tasks, splitter on a task level""" wf = Workflow(name="wf_ndst_3", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split(("x", "y"))) @@ -705,6 +735,7 @@ def test_wf_ndst_3(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -716,7 +747,7 @@ def test_wf_ndst_3(plugin): assert wf.output_dir.exists() -def test_wf_st_4(plugin): +def test_wf_st_4(plugin, tmpdir): """ workflow with two tasks, scalar splitter and combiner for the workflow""" wf = Workflow(name="wf_st_4", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) @@ -726,6 +757,7 @@ def test_wf_st_4(plugin): wf.combine("x") wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -742,7 +774,7 @@ def test_wf_st_4(plugin): assert odir.exists() -def test_wf_ndst_4(plugin): +def test_wf_ndst_4(plugin, tmpdir): """ workflow with two tasks, scalar splitter and combiner on tasks level""" wf = Workflow(name="wf_ndst_4", input_spec=["a", "b"]) wf.add(multiply(name="mult", x=wf.lzin.a, y=wf.lzin.b).split(("x", "y"))) @@ -750,6 +782,7 @@ def test_wf_ndst_4(plugin): wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir wf.inputs.a = [1, 2] wf.inputs.b = [11, 12] @@ -765,7 +798,7 @@ def test_wf_ndst_4(plugin): assert wf.output_dir.exists() -def test_wf_st_5(plugin): +def test_wf_st_5(plugin, tmpdir): """ workflow with two tasks, outer splitter and no combiner""" wf = Workflow(name="wf_st_5", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) @@ -773,6 +806,7 @@ def test_wf_st_5(plugin): wf.split(["x", "y"], x=[1, 2], y=[11, 12]) wf.set_output([("out", wf.add2.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -788,7 +822,7 @@ def test_wf_st_5(plugin): assert odir.exists() -def test_wf_ndst_5(plugin): +def test_wf_ndst_5(plugin, tmpdir): """ workflow with two tasks, outer splitter on tasks level and no combiner""" wf = Workflow(name="wf_ndst_5", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split(["x", "y"])) @@ -796,6 +830,7 @@ def test_wf_ndst_5(plugin): wf.inputs.x = [1, 2] wf.inputs.y = [11, 12] wf.set_output([("out", wf.add2.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -809,7 +844,7 @@ def test_wf_ndst_5(plugin): assert wf.output_dir.exists() -def test_wf_st_6(plugin): +def test_wf_st_6(plugin, tmpdir): """ workflow with two tasks, outer splitter and combiner for the workflow""" wf = Workflow(name="wf_st_6", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) @@ -819,6 +854,7 @@ def test_wf_st_6(plugin): wf.combine("x") wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -836,7 +872,7 @@ def test_wf_st_6(plugin): assert odir.exists() -def test_wf_ndst_6(plugin): +def test_wf_ndst_6(plugin, tmpdir): """ workflow with two tasks, outer splitter and combiner on tasks level""" wf = Workflow(name="wf_ndst_6", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split(["x", "y"])) @@ -845,6 +881,7 @@ def test_wf_ndst_6(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -857,7 +894,7 @@ def test_wf_ndst_6(plugin): assert wf.output_dir.exists() -def test_wf_ndst_7(plugin): +def test_wf_ndst_7(plugin, tmpdir): """ workflow with two tasks, outer splitter and (full) combiner for first node only""" wf = Workflow(name="wf_ndst_6", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split("x").combine("x")) @@ -866,6 +903,7 @@ def test_wf_ndst_7(plugin): wf.inputs.y = 11 wf.set_output([("out", wf.iden.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -877,7 +915,7 @@ def test_wf_ndst_7(plugin): assert wf.output_dir.exists() -def test_wf_ndst_8(plugin): +def test_wf_ndst_8(plugin, tmpdir): """ workflow with two tasks, outer splitter and (partial) combiner for first task only""" wf = Workflow(name="wf_ndst_6", input_spec=["x", "y"]) wf.add( @@ -888,6 +926,7 @@ def test_wf_ndst_8(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.iden.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -900,7 +939,7 @@ def test_wf_ndst_8(plugin): assert wf.output_dir.exists() -def test_wf_ndst_9(plugin): +def test_wf_ndst_9(plugin, tmpdir): """ workflow with two tasks, outer splitter and (full) combiner for first task only""" wf = Workflow(name="wf_ndst_6", input_spec=["x", "y"]) wf.add( @@ -913,6 +952,7 @@ def test_wf_ndst_9(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.iden.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -927,7 +967,7 @@ def test_wf_ndst_9(plugin): # workflows with structures A -> B -> C -def test_wf_3sernd_ndst_1(plugin): +def test_wf_3sernd_ndst_1(plugin, tmpdir): """ workflow with three "serial" tasks, checking if the splitter is propagating""" wf = Workflow(name="wf_3sernd_ndst_1", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split(["x", "y"])) @@ -937,6 +977,7 @@ def test_wf_3sernd_ndst_1(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.add2_2nd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -965,6 +1006,7 @@ def test_wf_3nd_st_1(plugin_dask_opt): wf.split(["x", "y"], x=[1, 2, 3], y=[11, 12]) wf.set_output([("out", wf.mult.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin_dask_opt) as sub: sub(wf) @@ -992,6 +1034,7 @@ def test_wf_3nd_ndst_1(plugin_dask_opt): wf.inputs.x = [1, 2, 3] wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin_dask_opt) as sub: sub(wf) @@ -1003,7 +1046,7 @@ def test_wf_3nd_ndst_1(plugin_dask_opt): assert wf.output_dir.exists() -def test_wf_3nd_st_2(plugin): +def test_wf_3nd_st_2(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter and partial combiner on the workflow level """ @@ -1015,6 +1058,7 @@ def test_wf_3nd_st_2(plugin): wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1033,7 +1077,7 @@ def test_wf_3nd_st_2(plugin): assert odir.exists() -def test_wf_3nd_ndst_2(plugin): +def test_wf_3nd_ndst_2(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter and partial combiner on the tasks levels """ @@ -1049,6 +1093,7 @@ def test_wf_3nd_ndst_2(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1061,7 +1106,7 @@ def test_wf_3nd_ndst_2(plugin): assert wf.output_dir.exists() -def test_wf_3nd_st_3(plugin): +def test_wf_3nd_st_3(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter and partial combiner (from the second task) on the workflow level """ @@ -1072,6 +1117,7 @@ def test_wf_3nd_st_3(plugin): wf.split(["x", "y"], x=[1, 2, 3], y=[11, 12]).combine("y") wf.set_output([("out", wf.mult.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1090,7 +1136,7 @@ def test_wf_3nd_st_3(plugin): assert odir.exists() -def test_wf_3nd_ndst_3(plugin): +def test_wf_3nd_ndst_3(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter and partial combiner (from the second task) on the tasks levels """ @@ -1105,6 +1151,7 @@ def test_wf_3nd_ndst_3(plugin): wf.inputs.x = [1, 2, 3] wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1118,7 +1165,7 @@ def test_wf_3nd_ndst_3(plugin): assert wf.output_dir.exists() -def test_wf_3nd_st_4(plugin): +def test_wf_3nd_st_4(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter and full combiner on the workflow level """ @@ -1129,6 +1176,7 @@ def test_wf_3nd_st_4(plugin): wf.split(["x", "y"], x=[1, 2, 3], y=[11, 12]).combine(["x", "y"]) wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1147,7 +1195,7 @@ def test_wf_3nd_st_4(plugin): assert odir.exists() -def test_wf_3nd_ndst_4(plugin): +def test_wf_3nd_ndst_4(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter and full combiner on the tasks levels """ @@ -1163,6 +1211,7 @@ def test_wf_3nd_ndst_4(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1175,7 +1224,7 @@ def test_wf_3nd_ndst_4(plugin): assert wf.output_dir.exists() -def test_wf_3nd_st_5(plugin): +def test_wf_3nd_st_5(plugin, tmpdir): """ workflow with three tasks (A->C, B->C) and three fields in the splitter, splitter and partial combiner (from the second task) on the workflow level """ @@ -1191,6 +1240,7 @@ def test_wf_3nd_st_5(plugin): wf.set_output([("out", wf.addvar.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1212,7 +1262,7 @@ def test_wf_3nd_st_5(plugin): assert odir.exists() -def test_wf_3nd_ndst_5(plugin): +def test_wf_3nd_ndst_5(plugin, tmpdir): """ workflow with three tasks (A->C, B->C) and three fields in the splitter, all tasks have splitters and the last one has a partial combiner (from the 2nd) """ @@ -1232,6 +1282,7 @@ def test_wf_3nd_ndst_5(plugin): wf.set_output([("out", wf.addvar.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1247,7 +1298,7 @@ def test_wf_3nd_ndst_5(plugin): assert wf.output_dir.exists() -def test_wf_3nd_ndst_6(plugin): +def test_wf_3nd_ndst_6(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, the third one uses scalar splitter from the previous ones and a combiner """ @@ -1263,6 +1314,7 @@ def test_wf_3nd_ndst_6(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1276,7 +1328,7 @@ def test_wf_3nd_ndst_6(plugin): # workflows with Left and Right part in splitters A -> B (L&R parts of the splitter) -def test_wf_ndstLR_1(plugin): +def test_wf_ndstLR_1(plugin, tmpdir): """ Test workflow with 2 tasks, splitters on tasks levels The second task has its own simple splitter and the Left part from the first task should be added @@ -1288,6 +1340,7 @@ def test_wf_ndstLR_1(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1304,7 +1357,7 @@ def test_wf_ndstLR_1(plugin): assert wf.output_dir.exists() -def test_wf_ndstLR_1a(plugin): +def test_wf_ndstLR_1a(plugin, tmpdir): """ Test workflow with 2 tasks, splitters on tasks levels The second task has splitter that has Left part (from previous state) and the Right part (it's onw splitter) @@ -1318,6 +1371,7 @@ def test_wf_ndstLR_1a(plugin): wf.inputs.y = [11, 12] wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1334,7 +1388,7 @@ def test_wf_ndstLR_1a(plugin): assert wf.output_dir.exists() -def test_wf_ndstLR_2(plugin): +def test_wf_ndstLR_2(plugin, tmpdir): """ Test workflow with 2 tasks, splitters on tasks levels The second task has its own outer splitter and the Left part from the first task should be added @@ -1351,6 +1405,7 @@ def test_wf_ndstLR_2(plugin): wf.inputs.z = [100, 200] wf.set_output([("out", wf.addvar.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1383,7 +1438,7 @@ def test_wf_ndstLR_2(plugin): assert wf.output_dir.exists() -def test_wf_ndstLR_2a(plugin): +def test_wf_ndstLR_2a(plugin, tmpdir): """ Test workflow with 2 tasks, splitters on tasks levels The second task has splitter that has Left part (from previous state) and the Right part (it's onw outer splitter) @@ -1400,6 +1455,7 @@ def test_wf_ndstLR_2a(plugin): wf.inputs.z = [100, 200] wf.set_output([("out", wf.addvar.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1435,7 +1491,7 @@ def test_wf_ndstLR_2a(plugin): # workflows with inner splitters A -> B (inner spl) -def test_wf_ndstinner_1(plugin): +def test_wf_ndstinner_1(plugin, tmpdir): """ workflow with 2 tasks, the second task has inner splitter """ @@ -1445,6 +1501,7 @@ def test_wf_ndstinner_1(plugin): wf.inputs.x = 1 wf.set_output([("out_list", wf.list.lzout.out), ("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1459,7 +1516,7 @@ def test_wf_ndstinner_1(plugin): assert wf.output_dir.exists() -def test_wf_ndstinner_2(plugin): +def test_wf_ndstinner_2(plugin, tmpdir): """ workflow with 2 tasks, the second task has two inputs and inner splitter from one of the input """ @@ -1470,6 +1527,7 @@ def test_wf_ndstinner_2(plugin): wf.inputs.y = 10 wf.set_output([("out_list", wf.list.lzout.out), ("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1484,7 +1542,7 @@ def test_wf_ndstinner_2(plugin): assert wf.output_dir.exists() -def test_wf_ndstinner_3(plugin): +def test_wf_ndstinner_3(plugin, tmpdir): """ workflow with 2 tasks, the second task has two inputs and outer splitter that includes an inner field """ @@ -1495,6 +1553,7 @@ def test_wf_ndstinner_3(plugin): wf.inputs.y = [10, 100] wf.set_output([("out_list", wf.list.lzout.out), ("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1509,7 +1568,7 @@ def test_wf_ndstinner_3(plugin): assert wf.output_dir.exists() -def test_wf_ndstinner_4(plugin): +def test_wf_ndstinner_4(plugin, tmpdir): """ workflow with 3 tasks, the second task has two inputs and inner splitter from one of the input, the third task has no its own splitter @@ -1522,6 +1581,7 @@ def test_wf_ndstinner_4(plugin): wf.inputs.y = 10 wf.set_output([("out_list", wf.list.lzout.out), ("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1541,7 +1601,7 @@ def test_wf_ndstinner_4(plugin): # workflow that have some single values as the input -def test_wf_st_singl_1(plugin): +def test_wf_st_singl_1(plugin, tmpdir): """ workflow with two tasks, only one input is in the splitter and combiner""" wf = Workflow(name="wf_st_5", input_spec=["x", "y"]) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) @@ -1551,6 +1611,7 @@ def test_wf_st_singl_1(plugin): wf.combine("x") wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1564,7 +1625,7 @@ def test_wf_st_singl_1(plugin): assert odir.exists() -def test_wf_ndst_singl_1(plugin): +def test_wf_ndst_singl_1(plugin, tmpdir): """ workflow with two tasks, outer splitter and combiner on tasks level; only one input is part of the splitter, the other is a single value """ @@ -1575,6 +1636,7 @@ def test_wf_ndst_singl_1(plugin): wf.inputs.y = 11 wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1585,7 +1647,7 @@ def test_wf_ndst_singl_1(plugin): assert wf.output_dir.exists() -def test_wf_st_singl_2(plugin): +def test_wf_st_singl_2(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter on the workflow level only one input is part of the splitter, the other is a single value @@ -1598,6 +1660,7 @@ def test_wf_st_singl_2(plugin): wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1613,7 +1676,7 @@ def test_wf_st_singl_2(plugin): assert odir.exists() -def test_wf_ndst_singl_2(plugin): +def test_wf_ndst_singl_2(plugin, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter on the tasks levels only one input is part of the splitter, the other is a single value @@ -1626,6 +1689,7 @@ def test_wf_ndst_singl_2(plugin): wf.inputs.y = 11 wf.set_output([("out", wf.mult.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1640,7 +1704,7 @@ def test_wf_ndst_singl_2(plugin): # workflows with structures wf(A) -def test_wfasnd_1(plugin): +def test_wfasnd_1(plugin, tmpdir): """ workflow as a node workflow-node with one task and no splitter """ @@ -1653,6 +1717,7 @@ def test_wfasnd_1(plugin): wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1663,7 +1728,7 @@ def test_wfasnd_1(plugin): assert wf.output_dir.exists() -def test_wfasnd_wfinp_1(plugin): +def test_wfasnd_wfinp_1(plugin, tmpdir): """ workflow as a node workflow-node with one task and no splitter input set for the main workflow @@ -1677,6 +1742,7 @@ def test_wfasnd_wfinp_1(plugin): wf.inputs.x = 2 wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -1689,7 +1755,7 @@ def test_wfasnd_wfinp_1(plugin): assert wf.output_dir.exists() -def test_wfasnd_wfndupdate(plugin): +def test_wfasnd_wfndupdate(plugin, tmpdir): """ workflow as a node workflow-node with one task and no splitter wfasnode input is updated to use the main workflow input @@ -1704,6 +1770,7 @@ def test_wfasnd_wfndupdate(plugin): wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1713,7 +1780,7 @@ def test_wfasnd_wfndupdate(plugin): assert wf.output_dir.exists() -def test_wfasnd_wfndupdate_rerun(plugin): +def test_wfasnd_wfndupdate_rerun(plugin, tmpdir): """ workflow as a node workflow-node with one task and no splitter wfasnode is run first and later is @@ -1734,6 +1801,7 @@ def test_wfasnd_wfndupdate_rerun(plugin): wf.wfnd.inputs.x = wf.lzin.x wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1757,7 +1825,7 @@ def test_wfasnd_wfndupdate_rerun(plugin): assert wf_o.output_dir.exists() -def test_wfasnd_st_1(plugin): +def test_wfasnd_st_1(plugin, tmpdir): """ workflow as a node workflow-node with one task, splitter for wfnd @@ -1772,6 +1840,7 @@ def test_wfasnd_st_1(plugin): wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir checksum_before = wf.checksum with Submitter(plugin=plugin) as sub: @@ -1784,7 +1853,7 @@ def test_wfasnd_st_1(plugin): assert wf.output_dir.exists() -def test_wfasnd_st_updatespl_1(plugin): +def test_wfasnd_st_updatespl_1(plugin, tmpdir): """ workflow as a node workflow-node with one task, splitter for wfnd is set after add @@ -1799,6 +1868,7 @@ def test_wfasnd_st_updatespl_1(plugin): wfnd.split("x") wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1809,7 +1879,7 @@ def test_wfasnd_st_updatespl_1(plugin): assert wf.output_dir.exists() -def test_wfasnd_ndst_1(plugin): +def test_wfasnd_ndst_1(plugin, tmpdir): """ workflow as a node workflow-node with one task, splitter for node @@ -1825,6 +1895,7 @@ def test_wfasnd_ndst_1(plugin): wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1835,7 +1906,7 @@ def test_wfasnd_ndst_1(plugin): assert wf.output_dir.exists() -def test_wfasnd_ndst_updatespl_1(plugin): +def test_wfasnd_ndst_updatespl_1(plugin, tmpdir): """ workflow as a node workflow-node with one task, splitter for node added after add @@ -1852,6 +1923,7 @@ def test_wfasnd_ndst_updatespl_1(plugin): wfnd.add2.split("x") wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1862,7 +1934,7 @@ def test_wfasnd_ndst_updatespl_1(plugin): assert wf.output_dir.exists() -def test_wfasnd_wfst_1(plugin): +def test_wfasnd_wfst_1(plugin, tmpdir): """ workflow as a node workflow-node with one task, splitter for the main workflow @@ -1893,7 +1965,7 @@ def test_wfasnd_wfst_1(plugin): # workflows with structures wf(A) -> B -def test_wfasnd_st_2(plugin): +def test_wfasnd_st_2(plugin, tmpdir): """ workflow as a node, the main workflow has two tasks, splitter for wfnd @@ -1910,6 +1982,7 @@ def test_wfasnd_st_2(plugin): wf.add(add2(name="add2", x=wf.wfnd.lzout.out)) wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1920,7 +1993,7 @@ def test_wfasnd_st_2(plugin): assert wf.output_dir.exists() -def test_wfasnd_wfst_2(plugin): +def test_wfasnd_wfst_2(plugin, tmpdir): """ workflow as a node, the main workflow has two tasks, splitter for the main workflow @@ -1953,7 +2026,7 @@ def test_wfasnd_wfst_2(plugin): # workflows with structures A -> wf(B) -def test_wfasnd_ndst_3(plugin): +def test_wfasnd_ndst_3(plugin, tmpdir): """ workflow as the second node, the main workflow has two tasks, splitter for the first task @@ -1970,6 +2043,7 @@ def test_wfasnd_ndst_3(plugin): wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -1980,7 +2054,7 @@ def test_wfasnd_ndst_3(plugin): assert wf.output_dir.exists() -def test_wfasnd_wfst_3(plugin): +def test_wfasnd_wfst_3(plugin, tmpdir): """ workflow as the second node, the main workflow has two tasks, splitter for the main workflow @@ -1998,6 +2072,7 @@ def test_wfasnd_wfst_3(plugin): wf.set_output([("out", wf.wfnd.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3497,7 +3572,7 @@ def test_workflow_combine2(tmpdir): # testing lzout.all to collect all of the results and let FunctionTask deal with it -def test_wf_lzoutall_1(plugin): +def test_wf_lzoutall_1(plugin, tmpdir): """ workflow with 2 tasks, no splitter passing entire result object to add2_sub2_res function by using lzout.all syntax @@ -3518,7 +3593,7 @@ def test_wf_lzoutall_1(plugin): assert 8 == results.output.out -def test_wf_lzoutall_1a(plugin): +def test_wf_lzoutall_1a(plugin, tmpdir): """ workflow with 2 tasks, no splitter passing entire result object to add2_res function by using lzout.all syntax in the node connections and for wf output @@ -3539,7 +3614,7 @@ def test_wf_lzoutall_1a(plugin): assert results.output.out_all == {"out_add": 8, "out_sub": 4} -def test_wf_lzoutall_st_1(plugin): +def test_wf_lzoutall_st_1(plugin, tmpdir): """ workflow with 2 tasks, no splitter passing entire result object to add2_res function by using lzout.all syntax @@ -3560,7 +3635,7 @@ def test_wf_lzoutall_st_1(plugin): assert results.output.out_add == [8, 62, 62, 602] -def test_wf_lzoutall_st_1a(plugin): +def test_wf_lzoutall_st_1a(plugin, tmpdir): """ workflow with 2 tasks, no splitter passing entire result object to add2_res function by using lzout.all syntax @@ -3586,7 +3661,7 @@ def test_wf_lzoutall_st_1a(plugin): ] -def test_wf_lzoutall_st_2(plugin): +def test_wf_lzoutall_st_2(plugin, tmpdir): """ workflow with 2 tasks, no splitter passing entire result object to add2_res function by using lzout.all syntax @@ -3610,7 +3685,7 @@ def test_wf_lzoutall_st_2(plugin): assert results.output.out_add[1] == [62, 602] -def test_wf_lzoutall_st_2a(plugin): +def test_wf_lzoutall_st_2a(plugin, tmpdir): """ workflow with 2 tasks, no splitter passing entire result object to add2_res function by using lzout.all syntax @@ -3639,7 +3714,7 @@ def test_wf_lzoutall_st_2a(plugin): # worfklows that have files in the result, the files should be copied to the wf dir -def test_wf_resultfile_1(plugin): +def test_wf_resultfile_1(plugin, tmpdir): """ workflow with a file in the result, file should be copied to the wf dir""" wf = Workflow(name="wf_file_1", input_spec=["x"]) wf.add(fun_write_file(name="writefile", filename=wf.lzin.x)) @@ -3656,7 +3731,7 @@ def test_wf_resultfile_1(plugin): assert results.output.wf_out == wf.output_dir / "file_1.txt" -def test_wf_resultfile_2(plugin): +def test_wf_resultfile_2(plugin, tmpdir): """ workflow with a list of files in the wf result, all files should be copied to the wf dir """ @@ -3677,7 +3752,7 @@ def test_wf_resultfile_2(plugin): assert file == wf.output_dir / file_list[ii] -def test_wf_resultfile_3(plugin): +def test_wf_resultfile_3(plugin, tmpdir): """ workflow with a dictionaries of files in the wf result, all files should be copied to the wf dir """ @@ -3702,7 +3777,7 @@ def test_wf_resultfile_3(plugin): assert val == wf.output_dir / file_list[ii] -def test_wf_upstream_error1(plugin): +def test_wf_upstream_error1(plugin, tmpdir): """ workflow with two tasks, task2 dependent on an task1 which raised an error""" wf = Workflow(name="wf", input_spec=["x"]) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) @@ -3718,7 +3793,7 @@ def test_wf_upstream_error1(plugin): assert "raised an error" in str(excinfo.value) -def test_wf_upstream_error2(plugin): +def test_wf_upstream_error2(plugin, tmpdir): """ task2 dependent on task1, task1 errors, workflow-level split on task 1 goal - workflow finish running, one output errors but the other doesn't """ @@ -3737,7 +3812,7 @@ def test_wf_upstream_error2(plugin): assert "raised an error" in str(excinfo.value) -def test_wf_upstream_error3(plugin): +def test_wf_upstream_error3(plugin, tmpdir): """ task2 dependent on task1, task1 errors, task-level split on task 1 goal - workflow finish running, one output errors but the other doesn't """ @@ -3756,7 +3831,7 @@ def test_wf_upstream_error3(plugin): assert "raised an error" in str(excinfo.value) -def test_wf_upstream_error4(plugin): +def test_wf_upstream_error4(plugin, tmpdir): """ workflow with one task, which raises an error""" wf = Workflow(name="wf", input_spec=["x"]) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) @@ -3771,7 +3846,7 @@ def test_wf_upstream_error4(plugin): assert "addvar1" in str(excinfo.value) -def test_wf_upstream_error5(plugin): +def test_wf_upstream_error5(plugin, tmpdir): """ nested workflow with one task, which raises an error""" wf_main = Workflow(name="wf_main", input_spec=["x"]) wf = Workflow(name="wf", input_spec=["x"], x=wf_main.lzin.x) @@ -3791,7 +3866,7 @@ def test_wf_upstream_error5(plugin): assert "raised an error" in str(excinfo.value) -def test_wf_upstream_error6(plugin): +def test_wf_upstream_error6(plugin, tmpdir): """ nested workflow with two tasks, the first one raises an error""" wf_main = Workflow(name="wf_main", input_spec=["x"]) wf = Workflow(name="wf", input_spec=["x"], x=wf_main.lzin.x) @@ -3812,7 +3887,7 @@ def test_wf_upstream_error6(plugin): assert "raised an error" in str(excinfo.value) -def test_wf_upstream_error7(plugin): +def test_wf_upstream_error7(plugin, tmpdir): """ workflow with three sequential tasks, the first task raises an error the last task is set as the workflow output @@ -3834,7 +3909,7 @@ def test_wf_upstream_error7(plugin): assert wf.addvar2._errored == wf.addvar3._errored == ["addvar1"] -def test_wf_upstream_error7a(plugin): +def test_wf_upstream_error7a(plugin, tmpdir): """ workflow with three sequential tasks, the first task raises an error the second task is set as the workflow output @@ -3856,7 +3931,7 @@ def test_wf_upstream_error7a(plugin): assert wf.addvar2._errored == wf.addvar3._errored == ["addvar1"] -def test_wf_upstream_error7b(plugin): +def test_wf_upstream_error7b(plugin, tmpdir): """ workflow with three sequential tasks, the first task raises an error the second and the third tasks are set as the workflow output @@ -3878,7 +3953,7 @@ def test_wf_upstream_error7b(plugin): assert wf.addvar2._errored == wf.addvar3._errored == ["addvar1"] -def test_wf_upstream_error8(plugin): +def test_wf_upstream_error8(plugin, tmpdir): """ workflow with three tasks, the first one raises an error, so 2 others are removed""" wf = Workflow(name="wf", input_spec=["x"]) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) @@ -3898,7 +3973,7 @@ def test_wf_upstream_error8(plugin): assert wf.addvar2._errored == wf.addtwo._errored == ["addvar1"] -def test_wf_upstream_error9(plugin): +def test_wf_upstream_error9(plugin, tmpdir): """ workflow with five tasks with two "branches", one branch has an error, the second is fine @@ -3924,7 +3999,7 @@ def test_wf_upstream_error9(plugin): assert wf.follow_err._errored == ["err"] -def test_wf_upstream_error9a(plugin): +def test_wf_upstream_error9a(plugin, tmpdir): """ workflow with five tasks with two "branches", one branch has an error, the second is fine @@ -3948,7 +4023,7 @@ def test_wf_upstream_error9a(plugin): assert wf.follow_err._errored == ["err"] -def test_wf_upstream_error9b(plugin): +def test_wf_upstream_error9b(plugin, tmpdir): """ workflow with five tasks with two "branches", one branch has an error, the second is fine From 153140698fa94553cd88eb98f9a41dda00dc43da Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Thu, 8 Oct 2020 23:22:18 +0800 Subject: [PATCH 02/12] fix workflow cache_dir in test_submitter.py --- pydra/engine/tests/test_submitter.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pydra/engine/tests/test_submitter.py b/pydra/engine/tests/test_submitter.py index 0d40b7f1b0..e420294d3e 100644 --- a/pydra/engine/tests/test_submitter.py +++ b/pydra/engine/tests/test_submitter.py @@ -23,7 +23,6 @@ def sleep_add_one(x): def test_callable_wf(plugin, tmpdir): wf = gen_basic_wf() - wf.cache_dir = tmpdir with pytest.raises(NotImplementedError): wf() @@ -33,6 +32,8 @@ def test_callable_wf(plugin, tmpdir): del wf, res wf = gen_basic_wf() + wf.cache_dir = tmpdir + sub = Submitter(plugin) res = wf(submitter=sub) assert res.output.out == 9 @@ -96,8 +97,9 @@ def test_wf_in_wf(plugin, tmpdir): subwf.set_output([("out", subwf.sub_b.lzout.out)]) # connect, then add subwf.inputs.x = wf.wf_a.lzout.out - wf.add(subwf) + subwf.cache_dir = tmpdir + wf.add(subwf) wf.add(sleep_add_one(name="wf_b", x=wf.sub_wf.lzout.out)) wf.set_output([("out", wf.wf_b.lzout.out)]) wf.cache_dir = tmpdir @@ -118,6 +120,7 @@ def test_wf2(plugin_dask_opt, tmpdir): wfnd.add(sleep_add_one(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.inputs.x = 2 + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) From 83cf8eaeb5febe5a13128ac9efe7310987428d10 Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Thu, 8 Oct 2020 23:37:41 +0800 Subject: [PATCH 03/12] fix workflow cache_dir in test_workflow.py --- pydra/engine/tests/test_workflow.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/pydra/engine/tests/test_workflow.py b/pydra/engine/tests/test_workflow.py index 50265d3eb9..c34e004d2d 100644 --- a/pydra/engine/tests/test_workflow.py +++ b/pydra/engine/tests/test_workflow.py @@ -153,6 +153,7 @@ def test_wf_2(plugin, tmpdir): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -263,7 +264,7 @@ def test_wf_2d_outpasdict(plugin, tmpdir): @pytest.mark.flaky(reruns=3) # when dask -def test_wf_3(plugin_dask_opt): +def test_wf_3(plugin_dask_opt, tmpdir): """ testing None value for an input""" wf = Workflow(name="wf_3", input_spec=["x", "y"]) wf.add(fun_addvar_none(name="addvar", a=wf.lzin.x, b=wf.lzin.y)) @@ -375,7 +376,7 @@ def test_wf_5a(plugin, tmpdir): assert 1 == results.output.out_sub -def test_wf_5b_exception(): +def test_wf_5b_exception(tmpdir): """ set_output used twice with the same name - exception should be raised """ wf = Workflow(name="wf_5", input_spec=["x", "y"], x=3, y=2) wf.add(fun_addsubvar(name="addsub", a=wf.lzin.x, b=wf.lzin.y)) @@ -995,7 +996,7 @@ def test_wf_3sernd_ndst_1(plugin, tmpdir): @pytest.mark.flaky(reruns=3) # when dask -def test_wf_3nd_st_1(plugin_dask_opt): +def test_wf_3nd_st_1(plugin_dask_opt, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter on the workflow level """ @@ -1023,7 +1024,7 @@ def test_wf_3nd_st_1(plugin_dask_opt): @pytest.mark.flaky(reruns=3) # when dask -def test_wf_3nd_ndst_1(plugin_dask_opt): +def test_wf_3nd_ndst_1(plugin_dask_opt, tmpdir): """ workflow with three tasks, third one connected to two previous tasks, splitter on the tasks levels """ @@ -1712,6 +1713,7 @@ def test_wfasnd_1(plugin, tmpdir): wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.inputs.x = 2 + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1737,6 +1739,7 @@ def test_wfasnd_wfinp_1(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) + wfnd.cache_dir = tmpdir wf.add(wfnd) wf.inputs.x = 2 @@ -1764,6 +1767,7 @@ def test_wfasnd_wfndupdate(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=2) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"], x=3) wfnd.inputs.x = wf.lzin.x @@ -1790,6 +1794,7 @@ def test_wfasnd_wfndupdate_rerun(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=2) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) + wfnd.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wfnd) @@ -1835,6 +1840,7 @@ def test_wfasnd_st_1(plugin, tmpdir): wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.split("x") wfnd.inputs.x = [2, 4] + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1862,6 +1868,7 @@ def test_wfasnd_st_updatespl_1(plugin, tmpdir): wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.inputs.x = [2, 4] + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1890,6 +1897,7 @@ def test_wfasnd_ndst_1(plugin, tmpdir): # TODO: without this the test is failing wfnd.plugin = plugin wfnd.inputs.x = [2, 4] + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1917,6 +1925,7 @@ def test_wfasnd_ndst_updatespl_1(plugin, tmpdir): # TODO: without this the test is failing wfnd.plugin = plugin wfnd.inputs.x = [2, 4] + wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1943,6 +1952,7 @@ def test_wfasnd_wfst_1(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) + wfnd.cache_dir = tmpdir wf.add(wfnd) wf.split("x") @@ -1976,6 +1986,7 @@ def test_wfasnd_st_2(plugin, tmpdir): wfnd.split(("x", "y")) wfnd.inputs.x = [2, 4] wfnd.inputs.y = [1, 10] + wfnd.cache_dir = tmpdir wf = Workflow(name="wf_st_3", input_spec=["x", "y"]) wf.add(wfnd) @@ -2002,6 +2013,7 @@ def test_wfasnd_wfst_2(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x", "y"], x=wf.lzin.x, y=wf.lzin.y) wfnd.add(multiply(name="mult", x=wfnd.lzin.x, y=wfnd.lzin.y)) wfnd.set_output([("out", wfnd.mult.lzout.out)]) + wfnd.cache_dir = tmpdir wf.add(wfnd) wf.add(add2(name="add2", x=wf.wfnd.lzout.out)) @@ -2010,6 +2022,7 @@ def test_wfasnd_wfst_2(plugin, tmpdir): wf.inputs.y = [1, 10] wf.set_output([("out", wf.add2.lzout.out)]) wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -2039,6 +2052,7 @@ def test_wfasnd_ndst_3(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) + wfnd.cache_dir = tmpdir wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) @@ -2068,6 +2082,7 @@ def test_wfasnd_wfst_3(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) + wfnd.cache_dir = tmpdir wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) From 4a608b1010ae9f62e2997006ac259492032b1dd1 Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Fri, 9 Oct 2020 00:02:10 +0800 Subject: [PATCH 04/12] add tmpdir to tests in test_workflow.py --- pydra/engine/tests/test_node_task.py | 74 +++++++++++++++++++++------- 1 file changed, 55 insertions(+), 19 deletions(-) diff --git a/pydra/engine/tests/test_node_task.py b/pydra/engine/tests/test_node_task.py index 518cf95a02..34ec3c11ca 100644 --- a/pydra/engine/tests/test_node_task.py +++ b/pydra/engine/tests/test_node_task.py @@ -365,9 +365,10 @@ def test_odir_init(): @pytest.mark.flaky(reruns=2) # when dask -def test_task_nostate_1(plugin_dask_opt): +def test_task_nostate_1(plugin_dask_opt, tmpdir): """ task without splitter""" nn = fun_addtwo(name="NA", a=3) + nn.cache_dir = tmpdir assert np.allclose(nn.inputs.a, [3]) assert nn.state is None @@ -405,9 +406,10 @@ def test_task_nostate_1_call(): @pytest.mark.flaky(reruns=2) # when dask -def test_task_nostate_1_call_subm(plugin_dask_opt): +def test_task_nostate_1_call_subm(plugin_dask_opt, tmpdir): """ task without splitter""" nn = fun_addtwo(name="NA", a=3) + nn.cache_dir = tmpdir assert np.allclose(nn.inputs.a, [3]) assert nn.state is None @@ -422,9 +424,10 @@ def test_task_nostate_1_call_subm(plugin_dask_opt): @pytest.mark.flaky(reruns=2) # when dask -def test_task_nostate_1_call_plug(plugin_dask_opt): +def test_task_nostate_1_call_plug(plugin_dask_opt, tmpdir): """ task without splitter""" nn = fun_addtwo(name="NA", a=3) + nn.cache_dir = tmpdir assert np.allclose(nn.inputs.a, [3]) assert nn.state is None @@ -450,9 +453,10 @@ def test_task_nostate_1_call_updateinp(): assert nn.output_dir.exists() -def test_task_nostate_2(plugin): +def test_task_nostate_2(plugin, tmpdir): """ task with a list as an input, but no splitter""" nn = moment(name="NA", n=3, lst=[2, 3, 4]) + nn.cache_dir = tmpdir assert np.allclose(nn.inputs.n, [3]) assert np.allclose(nn.inputs.lst, [2, 3, 4]) assert nn.state is None @@ -467,9 +471,10 @@ def test_task_nostate_2(plugin): assert nn.output_dir.exists() -def test_task_nostate_3(plugin): +def test_task_nostate_3(plugin, tmpdir): """ task with a dictionary as an input""" nn = fun_dict(name="NA", d={"a": "ala", "b": "bala"}) + nn.cache_dir = tmpdir assert nn.inputs.d == {"a": "ala", "b": "bala"} with Submitter(plugin=plugin) as sub: @@ -489,6 +494,7 @@ def test_task_nostate_4(plugin, tmpdir): f.write("hello from pydra\n") nn = fun_file(name="NA", filename=file1) + nn.cache_dir = tmpdir with Submitter(plugin) as sub: sub(nn) @@ -719,13 +725,14 @@ def test_task_nostate_cachelocations_updated(plugin, tmpdir): @pytest.mark.flaky(reruns=2) # when dask @pytest.mark.parametrize("input_type", ["list", "array"]) -def test_task_state_1(plugin_dask_opt, input_type): +def test_task_state_1(plugin_dask_opt, input_type, tmpdir): """ task with the simplest splitter""" a_in = [3, 5] if input_type == "array": a_in = np.array(a_in) nn = fun_addtwo(name="NA").split(splitter="a", a=a_in) + nn.cache_dir = tmpdir assert nn.state.splitter == "NA.a" assert nn.state.splitter_rpn == ["NA.a"] @@ -761,11 +768,12 @@ def test_task_state_1(plugin_dask_opt, input_type): assert odir.exists() -def test_task_state_1a(plugin): +def test_task_state_1a(plugin, tmpdir): """ task with the simplest splitter (inputs set separately)""" nn = fun_addtwo(name="NA") nn.split(splitter="a") nn.inputs.a = [3, 5] + nn.cache_dir = tmpdir assert nn.state.splitter == "NA.a" assert nn.state.splitter_rpn == ["NA.a"] @@ -781,11 +789,12 @@ def test_task_state_1a(plugin): assert results[i].output.out == res[1] -def test_task_state_singl_1(plugin): +def test_task_state_singl_1(plugin, tmpdir): """ Tasks with two inputs and a splitter (no combiner) one input is a single value, the other is in the splitter and combiner """ nn = fun_addvar(name="NA").split(splitter="a", a=[3, 5], b=10) + nn.cache_dir = tmpdir assert nn.inputs.a == [3, 5] assert nn.inputs.b == 10 @@ -839,7 +848,14 @@ def test_task_state_singl_1(plugin): ) @pytest.mark.parametrize("input_type", ["list", "array", "mixed"]) def test_task_state_2( - plugin, splitter, state_splitter, state_rpn, expected, expected_ind, input_type + plugin, + splitter, + state_splitter, + state_rpn, + expected, + expected_ind, + input_type, + tmpdir, ): """ Tasks with two inputs and a splitter (no combiner)""" a_in, b_in = [3, 5], [10, 20] @@ -848,6 +864,8 @@ def test_task_state_2( elif input_type == "mixed": a_in = np.array(a_in) nn = fun_addvar(name="NA").split(splitter=splitter, a=a_in, b=b_in) + nn.cache_dir = tmpdir + assert (nn.inputs.a == np.array([3, 5])).all() assert (nn.inputs.b == np.array([10, 20])).all() assert nn.state.splitter == state_splitter @@ -883,9 +901,10 @@ def test_task_state_2( assert odir.exists() -def test_task_state_3(plugin): +def test_task_state_3(plugin, tmpdir): """ task with the simplest splitter, the input is an empty list""" nn = fun_addtwo(name="NA").split(splitter="a", a=[]) + nn.cache_dir = tmpdir assert nn.state.splitter == "NA.a" assert nn.state.splitter_rpn == ["NA.a"] @@ -904,12 +923,14 @@ def test_task_state_3(plugin): @pytest.mark.parametrize("input_type", ["list", "array"]) -def test_task_state_4(plugin, input_type): +def test_task_state_4(plugin, input_type, tmpdir): """ task with a list as an input, and a simple splitter """ lst_in = [[2, 3, 4], [1, 2, 3]] if input_type == "array": lst_in = np.array(lst_in) nn = moment(name="NA", n=3, lst=lst_in).split(splitter="lst") + nn.cache_dir = tmpdir + assert np.allclose(nn.inputs.n, 3) assert np.allclose(nn.inputs.lst, [[2, 3, 4], [1, 2, 3]]) assert nn.state.splitter == "NA.lst" @@ -935,9 +956,11 @@ def test_task_state_4(plugin, input_type): assert odir.exists() -def test_task_state_4a(plugin): +def test_task_state_4a(plugin, tmpdir): """ task with a tuple as an input, and a simple splitter """ nn = moment(name="NA", n=3, lst=[(2, 3, 4), (1, 2, 3)]).split(splitter="lst") + nn.cache_dir = tmpdir + assert np.allclose(nn.inputs.n, 3) assert np.allclose(nn.inputs.lst, [[2, 3, 4], [1, 2, 3]]) assert nn.state.splitter == "NA.lst" @@ -955,11 +978,13 @@ def test_task_state_4a(plugin): assert odir.exists() -def test_task_state_5(plugin): +def test_task_state_5(plugin, tmpdir): """ task with a list as an input, and the variable is part of the scalar splitter""" nn = moment(name="NA", n=[1, 3], lst=[[2, 3, 4], [1, 2, 3]]).split( splitter=("n", "lst") ) + nn.cache_dir = tmpdir + assert np.allclose(nn.inputs.n, [1, 3]) assert np.allclose(nn.inputs.lst, [[2, 3, 4], [1, 2, 3]]) assert nn.state.splitter == ("NA.n", "NA.lst") @@ -977,13 +1002,15 @@ def test_task_state_5(plugin): assert odir.exists() -def test_task_state_5_exception(plugin): +def test_task_state_5_exception(plugin, tmpdir): """ task with a list as an input, and the variable is part of the scalar splitter the shapes are not matching, so exception should be raised """ nn = moment(name="NA", n=[1, 3, 3], lst=[[2, 3, 4], [1, 2, 3]]).split( splitter=("n", "lst") ) + nn.cache_dir = tmpdir + assert np.allclose(nn.inputs.n, [1, 3, 3]) assert np.allclose(nn.inputs.lst, [[2, 3, 4], [1, 2, 3]]) assert nn.state.splitter == ("NA.n", "NA.lst") @@ -994,11 +1021,13 @@ def test_task_state_5_exception(plugin): assert "shape" in str(excinfo.value) -def test_task_state_6(plugin): +def test_task_state_6(plugin, tmpdir): """ ask with a list as an input, and the variable is part of the outer splitter """ nn = moment(name="NA", n=[1, 3], lst=[[2, 3, 4], [1, 2, 3]]).split( splitter=["n", "lst"] ) + nn.cache_dir = tmpdir + assert np.allclose(nn.inputs.n, [1, 3]) assert np.allclose(nn.inputs.lst, [[2, 3, 4], [1, 2, 3]]) assert nn.state.splitter == ["NA.n", "NA.lst"] @@ -1016,11 +1045,13 @@ def test_task_state_6(plugin): assert odir.exists() -def test_task_state_6a(plugin): +def test_task_state_6a(plugin, tmpdir): """ ask with a tuple as an input, and the variable is part of the outer splitter """ nn = moment(name="NA", n=[1, 3], lst=[(2, 3, 4), (1, 2, 3)]).split( splitter=["n", "lst"] ) + nn.cache_dir = tmpdir + assert np.allclose(nn.inputs.n, [1, 3]) assert np.allclose(nn.inputs.lst, [[2, 3, 4], [1, 2, 3]]) assert nn.state.splitter == ["NA.n", "NA.lst"] @@ -1039,9 +1070,10 @@ def test_task_state_6a(plugin): @pytest.mark.flaky(reruns=2) # when dask -def test_task_state_comb_1(plugin_dask_opt): +def test_task_state_comb_1(plugin_dask_opt, tmpdir): """ task with the simplest splitter and combiner""" nn = fun_addtwo(name="NA").split(a=[3, 5], splitter="a").combine(combiner="a") + nn.cache_dir = tmpdir assert (nn.inputs.a == np.array([3, 5])).all() @@ -1173,6 +1205,7 @@ def test_task_state_comb_2( state_rpn_final, expected, expected_val, + tmpdir, ): """ Tasks with scalar and outer splitters and partial or full combiners""" nn = ( @@ -1180,6 +1213,7 @@ def test_task_state_comb_2( .split(a=[3, 5], b=[10, 20], splitter=splitter) .combine(combiner=combiner) ) + nn.cache_dir = tmpdir assert (nn.inputs.a == np.array([3, 5])).all() @@ -1219,11 +1253,12 @@ def test_task_state_comb_2( assert odir.exists() -def test_task_state_comb_singl_1(plugin): +def test_task_state_comb_singl_1(plugin, tmpdir): """ Tasks with two inputs; one input is a single value, the other is in the splitter and combiner """ nn = fun_addvar(name="NA").split(splitter="a", a=[3, 5], b=10).combine(combiner="a") + nn.cache_dir = tmpdir assert nn.inputs.a == [3, 5] assert nn.inputs.b == 10 @@ -1248,9 +1283,10 @@ def test_task_state_comb_singl_1(plugin): assert odir.exists() -def test_task_state_comb_3(plugin): +def test_task_state_comb_3(plugin, tmpdir): """ task with the simplest splitter, the input is an empty list""" nn = fun_addtwo(name="NA").split(splitter="a", a=[]).combine(combiner=["a"]) + nn.cache_dir = tmpdir assert nn.state.splitter == "NA.a" assert nn.state.splitter_rpn == ["NA.a"] From ec3e92b1ec34c5b8d4de26002802b5d69cfc4d74 Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Fri, 9 Oct 2020 00:12:27 +0800 Subject: [PATCH 05/12] add tmpdir to tests in test_shelltask.py --- pydra/engine/tests/test_shelltask.py | 142 +++++++++++++-------------- pydra/engine/tests/utils.py | 5 +- 2 files changed, 74 insertions(+), 73 deletions(-) diff --git a/pydra/engine/tests/test_shelltask.py b/pydra/engine/tests/test_shelltask.py index 740e22d78c..38b1e19dc2 100644 --- a/pydra/engine/tests/test_shelltask.py +++ b/pydra/engine/tests/test_shelltask.py @@ -17,20 +17,20 @@ @pytest.mark.flaky(reruns=2) # when dask @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_1(plugin_dask_opt, results_function): +def test_shell_cmd_1(plugin_dask_opt, results_function, tmpdir): """ simple command, no arguments """ cmd = ["pwd"] shelly = ShellCommandTask(name="shelly", executable=cmd) assert shelly.cmdline == " ".join(cmd) - res = results_function(shelly, plugin=plugin_dask_opt) + res = results_function(shelly, plugin=plugin_dask_opt, tmpdir=tmpdir) assert Path(res.output.stdout.rstrip()) == shelly.output_dir assert res.output.return_code == 0 assert res.output.stderr == "" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_1_strip(plugin, results_function): +def test_shell_cmd_1_strip(plugin, results_function, tmpdir): """ simple command, no arguments strip option to remove \n at the end os stdout """ @@ -38,27 +38,27 @@ def test_shell_cmd_1_strip(plugin, results_function): shelly = ShellCommandTask(name="shelly", executable=cmd, strip=True) assert shelly.cmdline == " ".join(cmd) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert Path(res.output.stdout) == Path(shelly.output_dir) assert res.output.return_code == 0 assert res.output.stderr == "" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_2(plugin, results_function): +def test_shell_cmd_2(plugin, results_function, tmpdir): """ a command with arguments, cmd and args given as executable """ cmd = ["echo", "hail", "pydra"] shelly = ShellCommandTask(name="shelly", executable=cmd) assert shelly.cmdline == " ".join(cmd) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout.strip() == " ".join(cmd[1:]) assert res.output.return_code == 0 assert res.output.stderr == "" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_2a(plugin, results_function): +def test_shell_cmd_2a(plugin, results_function, tmpdir): """ a command with arguments, using executable and args """ cmd_exec = "echo" cmd_args = ["hail", "pydra"] @@ -67,14 +67,14 @@ def test_shell_cmd_2a(plugin, results_function): assert shelly.inputs.executable == "echo" assert shelly.cmdline == "echo " + " ".join(cmd_args) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout.strip() == " ".join(cmd_args) assert res.output.return_code == 0 assert res.output.stderr == "" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_2b(plugin, results_function): +def test_shell_cmd_2b(plugin, results_function, tmpdir): """ a command with arguments, using strings executable and args """ cmd_exec = "echo" cmd_args = "pydra" @@ -83,7 +83,7 @@ def test_shell_cmd_2b(plugin, results_function): assert shelly.inputs.executable == "echo" assert shelly.cmdline == "echo pydra" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "pydra\n" assert res.output.return_code == 0 assert res.output.stderr == "" @@ -250,7 +250,7 @@ def test_wf_shell_cmd_1(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_1(plugin, results_function, use_validator): +def test_shell_cmd_inputspec_1(plugin, results_function, use_validator, tmpdir): """ a command with executable, args and one command opt, using a customized input_spec to add the opt to the command in the right place that is specified in metadata["cmd_pos"] @@ -284,12 +284,12 @@ def test_shell_cmd_inputspec_1(plugin, results_function, use_validator): assert shelly.inputs.args == cmd_args assert shelly.cmdline == "echo -n hello from pydra" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "hello from pydra" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_2(plugin, results_function, use_validator): +def test_shell_cmd_inputspec_2(plugin, results_function, use_validator, tmpdir): """ a command with executable, args and two command options, using a customized input_spec to add the opt to the command in the right place that is specified in metadata["cmd_pos"] @@ -331,12 +331,12 @@ def test_shell_cmd_inputspec_2(plugin, results_function, use_validator): assert shelly.inputs.executable == cmd_exec assert shelly.inputs.args == cmd_args assert shelly.cmdline == "echo -n HELLO from pydra" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "HELLO from pydra" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_3(plugin, results_function): +def test_shell_cmd_inputspec_3(plugin, results_function, tmpdir): """ mandatory field added to fields, value provided """ cmd_exec = "echo" hello = "HELLO" @@ -365,12 +365,12 @@ def test_shell_cmd_inputspec_3(plugin, results_function): ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "HELLO\n" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_3a(plugin, results_function): +def test_shell_cmd_inputspec_3a(plugin, results_function, tmpdir): """ mandatory field added to fields, value provided using shorter syntax for input spec (no attr.ib) """ @@ -394,12 +394,12 @@ def test_shell_cmd_inputspec_3a(plugin, results_function): ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "HELLO\n" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_3b(plugin, results_function): +def test_shell_cmd_inputspec_3b(plugin, results_function, tmpdir): """ mandatory field added to fields, value provided after init""" cmd_exec = "echo" hello = "HELLO" @@ -429,7 +429,7 @@ def test_shell_cmd_inputspec_3b(plugin, results_function): shelly.inputs.text = hello assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "HELLO\n" @@ -464,7 +464,7 @@ def test_shell_cmd_inputspec_3c_exception(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_3c(plugin, results_function): +def test_shell_cmd_inputspec_3c(plugin, results_function, tmpdir): """ mandatory=False, so tasks runs fine even without the value """ cmd_exec = "echo" my_input_spec = SpecInfo( @@ -493,12 +493,12 @@ def test_shell_cmd_inputspec_3c(plugin, results_function): ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "\n" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_4(plugin, results_function): +def test_shell_cmd_inputspec_4(plugin, results_function, tmpdir): """ mandatory field added to fields, value provided """ cmd_exec = "echo" my_input_spec = SpecInfo( @@ -524,12 +524,12 @@ def test_shell_cmd_inputspec_4(plugin, results_function): assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hello" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "Hello\n" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_4a(plugin, results_function): +def test_shell_cmd_inputspec_4a(plugin, results_function, tmpdir): """ mandatory field added to fields, value provided using shorter syntax for input spec (no attr.ib) """ @@ -550,12 +550,12 @@ def test_shell_cmd_inputspec_4a(plugin, results_function): assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hello" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "Hello\n" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_4b(plugin, results_function): +def test_shell_cmd_inputspec_4b(plugin, results_function, tmpdir): """ mandatory field added to fields, value provided """ cmd_exec = "echo" my_input_spec = SpecInfo( @@ -581,7 +581,7 @@ def test_shell_cmd_inputspec_4b(plugin, results_function): assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hi" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "Hi\n" @@ -654,7 +654,7 @@ def test_shell_cmd_inputspec_4d_exception(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_5_nosubm(plugin, results_function): +def test_shell_cmd_inputspec_5_nosubm(plugin, results_function, tmpdir): """ checking xor in metadata: task should work fine, since only one option is True""" cmd_exec = "ls" cmd_t = True @@ -695,7 +695,7 @@ def test_shell_cmd_inputspec_5_nosubm(plugin, results_function): ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -t" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) def test_shell_cmd_inputspec_5a_exception(plugin): @@ -747,7 +747,7 @@ def test_shell_cmd_inputspec_5a_exception(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_6(plugin, results_function): +def test_shell_cmd_inputspec_6(plugin, results_function, tmpdir): """ checking requires in metadata: the required field is set in the init, so the task works fine """ @@ -790,7 +790,7 @@ def test_shell_cmd_inputspec_6(plugin, results_function): ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -l -t" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) def test_shell_cmd_inputspec_6a_exception(plugin): @@ -834,7 +834,7 @@ def test_shell_cmd_inputspec_6a_exception(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_6b(plugin, results_function): +def test_shell_cmd_inputspec_6b(plugin, results_function, tmpdir): """ checking requires in metadata: the required field set after the init """ @@ -878,11 +878,11 @@ def test_shell_cmd_inputspec_6b(plugin, results_function): shelly.inputs.opt_l = cmd_l assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -l -t" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_7(plugin, results_function): +def test_shell_cmd_inputspec_7(plugin, results_function, tmpdir): """ providing output name using input_spec, using name_tamplate in metadata @@ -911,7 +911,7 @@ def test_shell_cmd_inputspec_7(plugin, results_function): name="shelly", executable=cmd, args=args, input_spec=my_input_spec ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out1.exists() # checking if the file is created in a good place @@ -920,7 +920,7 @@ def test_shell_cmd_inputspec_7(plugin, results_function): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_7a(plugin, results_function): +def test_shell_cmd_inputspec_7a(plugin, results_function, tmpdir): """ providing output name using input_spec, using name_tamplate in metadata @@ -951,7 +951,7 @@ def test_shell_cmd_inputspec_7a(plugin, results_function): name="shelly", executable=cmd, args=args, input_spec=my_input_spec ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out1_changed.exists() # checking if the file is created in a good place @@ -960,7 +960,7 @@ def test_shell_cmd_inputspec_7a(plugin, results_function): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_7b(plugin, results_function): +def test_shell_cmd_inputspec_7b(plugin, results_function, tmpdir): """ providing new file and output name using input_spec, using name_template in metadata @@ -998,7 +998,7 @@ def test_shell_cmd_inputspec_7b(plugin, results_function): input_spec=my_input_spec, ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out1.exists() @@ -1054,7 +1054,7 @@ def test_shell_cmd_inputspec_8(plugin, results_function, tmpdir): input_spec=my_input_spec, ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out1.exists() @@ -1110,7 +1110,7 @@ def test_shell_cmd_inputspec_8a(plugin, results_function, tmpdir): input_spec=my_input_spec, ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out1.exists() @@ -1154,7 +1154,7 @@ def test_shell_cmd_inputspec_9(tmpdir, plugin, results_function): name="shelly", executable=cmd, input_spec=my_input_spec, file_orig=file ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.file_copy.exists() assert res.output.file_copy.name == "file_copy.txt" @@ -1202,7 +1202,7 @@ def test_shell_cmd_inputspec_9a(tmpdir, plugin, results_function): name="shelly", executable=cmd, input_spec=my_input_spec, file_orig=file ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.file_copy.exists() assert res.output.file_copy.name == "file_copy" @@ -1249,7 +1249,7 @@ def test_shell_cmd_inputspec_9b(tmpdir, plugin, results_function): name="shelly", executable=cmd, input_spec=my_input_spec, file_orig=file ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.file_copy.exists() assert res.output.file_copy.name == "file" @@ -1295,7 +1295,7 @@ def test_shell_cmd_inputspec_10(plugin, results_function, tmpdir): ) assert shelly.inputs.executable == cmd_exec - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "hello from boston" @@ -1345,7 +1345,7 @@ def test_shell_cmd_inputspec_copyfile_1(plugin, results_function, tmpdir): name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=str(file) ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out_file.exists() # the file is copied, and than it is changed in place @@ -1403,7 +1403,7 @@ def test_shell_cmd_inputspec_copyfile_1a(plugin, results_function, tmpdir): name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=str(file) ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out_file.exists() # the file is uses a soft link, but it creates and an extra copy before modifying @@ -1475,7 +1475,7 @@ def test_shell_cmd_inputspec_copyfile_1b(plugin, results_function, tmpdir): name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=str(file) ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out_file.exists() # the file is not copied, it is changed in place @@ -1485,7 +1485,7 @@ def test_shell_cmd_inputspec_copyfile_1b(plugin, results_function, tmpdir): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_state_1(plugin, results_function): +def test_shell_cmd_inputspec_state_1(plugin, results_function, tmpdir): """ adding state to the input from input_spec """ cmd_exec = "echo" hello = ["HELLO", "hi"] @@ -1515,7 +1515,7 @@ def test_shell_cmd_inputspec_state_1(plugin, results_function): assert shelly.inputs.executable == cmd_exec # todo: this doesn't work when state # assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res[0].output.stdout == "HELLO\n" assert res[1].output.stdout == "hi\n" @@ -1565,7 +1565,7 @@ def test_shell_cmd_inputspec_typeval_2(use_validator): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_state_1a(plugin, results_function): +def test_shell_cmd_inputspec_state_1a(plugin, results_function, tmpdir): """ adding state to the input from input_spec using shorter syntax for input_spec (without default) """ @@ -1589,13 +1589,13 @@ def test_shell_cmd_inputspec_state_1a(plugin, results_function): ).split("text") assert shelly.inputs.executable == cmd_exec - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res[0].output.stdout == "HELLO\n" assert res[1].output.stdout == "hi\n" @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_inputspec_state_2(plugin, results_function): +def test_shell_cmd_inputspec_state_2(plugin, results_function, tmpdir): """ adding splitter to input tha is used in the output_file_tamplate """ @@ -1623,7 +1623,7 @@ def test_shell_cmd_inputspec_state_2(plugin, results_function): name="shelly", executable=cmd, args=args, input_spec=my_input_spec ).split("args") - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) for i in range(len(args)): assert res[i].output.stdout == "" assert res[i].output.out1.exists() @@ -1670,7 +1670,7 @@ def test_shell_cmd_inputspec_state_3(plugin, results_function, tmpdir): assert shelly.inputs.executable == cmd_exec # todo: this doesn't work when state # assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res[0].output.stdout == "hello from pydra" assert res[1].output.stdout == "have a nice one" @@ -1725,7 +1725,7 @@ def test_shell_cmd_inputspec_copyfile_state_1(plugin, results_function, tmpdir): ).split("orig_file") txt_l = ["from pydra", "world"] - res_l = results_function(shelly, plugin) + res_l = results_function(shelly, plugin, tmpdir) for i, res in enumerate(res_l): assert res.output.stdout == "" assert res.output.out_file.exists() @@ -2218,7 +2218,7 @@ def test_wf_shell_cmd_ndst_1(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_outputspec_1(plugin, results_function): +def test_shell_cmd_outputspec_1(plugin, results_function, tmpdir): """ customised output_spec, adding files to the output, providing specific pathname """ @@ -2230,13 +2230,13 @@ def test_shell_cmd_outputspec_1(plugin, results_function): ) shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.newfile.exists() @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_outputspec_1a(plugin, results_function): +def test_shell_cmd_outputspec_1a(plugin, results_function, tmpdir): """ customised output_spec, adding files to the output, providing specific pathname """ @@ -2248,7 +2248,7 @@ def test_shell_cmd_outputspec_1a(plugin, results_function): ) shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.newfile.exists() @@ -2272,7 +2272,7 @@ def test_shell_cmd_outputspec_1b_exception(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_outputspec_2(plugin, results_function): +def test_shell_cmd_outputspec_2(plugin, results_function, tmpdir): """ customised output_spec, adding files to the output, using a wildcard in default @@ -2285,7 +2285,7 @@ def test_shell_cmd_outputspec_2(plugin, results_function): ) shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.newfile.exists() @@ -2310,7 +2310,7 @@ def test_shell_cmd_outputspec_2a_exception(plugin): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_outputspec_3(plugin, results_function): +def test_shell_cmd_outputspec_3(plugin, results_function, tmpdir): """ customised output_spec, adding files to the output, using a wildcard in default, should collect two files @@ -2323,7 +2323,7 @@ def test_shell_cmd_outputspec_3(plugin, results_function): ) shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" # newfile is a list assert len(res.output.newfile) == 2 @@ -2331,7 +2331,7 @@ def test_shell_cmd_outputspec_3(plugin, results_function): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_outputspec_4(plugin, results_function): +def test_shell_cmd_outputspec_4(plugin, results_function, tmpdir): """ customised output_spec, adding files to the output, using a function to collect output, the function is saved in the field metadata @@ -2349,7 +2349,7 @@ def gather_output(keyname, output_dir): ) shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" # newfile is a list assert len(res.output.newfile) == 2 @@ -2357,7 +2357,7 @@ def gather_output(keyname, output_dir): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_outputspec_5(plugin, results_function): +def test_shell_cmd_outputspec_5(plugin, results_function, tmpdir): """ providing output name by providing output_file_template (similar to the previous example, but not touching input_spec) @@ -2386,7 +2386,7 @@ def test_shell_cmd_outputspec_5(plugin, results_function): name="shelly", executable=cmd, args=args, output_spec=my_output_spec ) - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.out1.exists() @@ -2421,7 +2421,7 @@ def test_shell_cmd_outputspec_5a(): @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_shell_cmd_state_outputspec_1(plugin, results_function): +def test_shell_cmd_state_outputspec_1(plugin, results_function, tmpdir): """ providing output name by providing output_file_template splitter for a field that is used in the template @@ -2450,7 +2450,7 @@ def test_shell_cmd_state_outputspec_1(plugin, results_function): name="shelly", executable=cmd, args=args, output_spec=my_output_spec ).split("args") - res = results_function(shelly, plugin) + res = results_function(shelly, plugin, tmpdir) for i in range(len(args)): assert res[i].output.stdout == "" assert res[i].output.out1.exists() diff --git a/pydra/engine/tests/utils.py b/pydra/engine/tests/utils.py index b2fbdab762..edc2533c28 100644 --- a/pydra/engine/tests/utils.py +++ b/pydra/engine/tests/utils.py @@ -23,15 +23,16 @@ ) -def result_no_submitter(shell_task, plugin=None): +def result_no_submitter(shell_task, plugin=None, tmpdir=None): """ helper function to return result when running without submitter """ return shell_task() -def result_submitter(shell_task, plugin): +def result_submitter(shell_task, plugin, tmpdir): """ helper function to return result when running with submitter with specific plugin """ + shell_task.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: shell_task(submitter=sub) return shell_task.result() From d10e9c83ef4f788e0892d2590371c3a406b8437e Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Tue, 13 Oct 2020 15:11:16 +0800 Subject: [PATCH 06/12] add tmpdir to test_boutiques.py --- pydra/engine/tests/test_boutiques.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydra/engine/tests/test_boutiques.py b/pydra/engine/tests/test_boutiques.py index b2ea022b77..5399b2e88a 100644 --- a/pydra/engine/tests/test_boutiques.py +++ b/pydra/engine/tests/test_boutiques.py @@ -26,12 +26,12 @@ "maskfile", ["test_brain.nii.gz", "test_brain", "test_brain.nii"] ) @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) -def test_boutiques_1(maskfile, plugin, results_function): +def test_boutiques_1(maskfile, plugin, results_function, tmpdir): """ simple task to run fsl.bet using BoshTask""" btask = BoshTask(name="NA", zenodo_id="1482743") btask.inputs.infile = Infile btask.inputs.maskfile = maskfile - res = results_function(btask, plugin) + res = results_function(btask, plugin, tmpdir) assert res.output.return_code == 0 From 6a8874f34145f320ef0cb52cdd5236e6bf5c3ce0 Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Tue, 13 Oct 2020 04:24:00 -0400 Subject: [PATCH 07/12] add tmpdir to tests in test_numpy_examples.py --- pydra/engine/tests/test_numpy_examples.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pydra/engine/tests/test_numpy_examples.py b/pydra/engine/tests/test_numpy_examples.py index 35b8972319..572d8707a2 100644 --- a/pydra/engine/tests/test_numpy_examples.py +++ b/pydra/engine/tests/test_numpy_examples.py @@ -17,12 +17,13 @@ def arrayout(val): return np.array([val, val]) -def test_multiout(plugin): +def test_multiout(plugin, tmpdir): """ testing a simple function that returns a numpy array""" wf = Workflow("wf", input_spec=["val"], val=2) wf.add(arrayout(name="mo", val=wf.lzin.val)) wf.set_output([("array", wf.mo.lzout.b)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin, n_procs=2) as sub: sub(runnable=wf) @@ -33,13 +34,14 @@ def test_multiout(plugin): assert np.array_equal(results[1].output.array, np.array([2, 2])) -def test_multiout_st(plugin): +def test_multiout_st(plugin, tmpdir): """ testing a simple function that returns a numpy array, adding splitter""" wf = Workflow("wf", input_spec=["val"], val=[0, 1, 2]) wf.add(arrayout(name="mo", val=wf.lzin.val)) wf.mo.split("val").combine("val") wf.set_output([("array", wf.mo.lzout.b)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin, n_procs=2) as sub: sub(runnable=wf) From 28d174297508a96dde07c6a43c11f0caab0c0d6b Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Tue, 13 Oct 2020 16:53:05 +0800 Subject: [PATCH 08/12] add tmpdir to tests in test_shelltask.py --- pydra/engine/tests/test_shelltask.py | 81 ++++++++++++++++++++-------- 1 file changed, 59 insertions(+), 22 deletions(-) diff --git a/pydra/engine/tests/test_shelltask.py b/pydra/engine/tests/test_shelltask.py index 38b1e19dc2..bf93cbd085 100644 --- a/pydra/engine/tests/test_shelltask.py +++ b/pydra/engine/tests/test_shelltask.py @@ -93,7 +93,7 @@ def test_shell_cmd_2b(plugin, results_function, tmpdir): @pytest.mark.flaky(reruns=2) -def test_shell_cmd_3(plugin_dask_opt): +def test_shell_cmd_3(plugin_dask_opt, tmpdir): """ commands without arguments splitter = executable """ @@ -101,6 +101,8 @@ def test_shell_cmd_3(plugin_dask_opt): # all args given as executable shelly = ShellCommandTask(name="shelly", executable=cmd).split("executable") + shelly.cache_dir = tmpdir + assert shelly.cmdline == ["pwd", "whoami"] res = shelly(plugin=plugin_dask_opt) assert Path(res[0].output.stdout.rstrip()) == shelly.output_dir[0] @@ -113,7 +115,7 @@ def test_shell_cmd_3(plugin_dask_opt): assert res[0].output.stderr == res[1].output.stderr == "" -def test_shell_cmd_4(plugin): +def test_shell_cmd_4(plugin, tmpdir): """ a command with arguments, using executable and args splitter=args """ @@ -123,6 +125,8 @@ def test_shell_cmd_4(plugin): shelly = ShellCommandTask(name="shelly", executable=cmd_exec, args=cmd_args).split( splitter="args" ) + shelly.cache_dir = tmpdir + assert shelly.inputs.executable == "echo" assert shelly.inputs.args == ["nipype", "pydra"] assert shelly.cmdline == ["echo nipype", "echo pydra"] @@ -135,7 +139,7 @@ def test_shell_cmd_4(plugin): assert res[0].output.stderr == res[1].output.stderr == "" -def test_shell_cmd_5(plugin): +def test_shell_cmd_5(plugin, tmpdir): """ a command with arguments using splitter and combiner for args """ @@ -147,6 +151,8 @@ def test_shell_cmd_5(plugin): .split(splitter="args") .combine("args") ) + shelly.cache_dir = tmpdir + assert shelly.inputs.executable == "echo" assert shelly.inputs.args == ["nipype", "pydra"] assert shelly.cmdline == ["echo nipype", "echo pydra"] @@ -156,7 +162,7 @@ def test_shell_cmd_5(plugin): assert res[1].output.stdout == "pydra\n" -def test_shell_cmd_6(plugin): +def test_shell_cmd_6(plugin, tmpdir): """ a command with arguments, outer splitter for executable and args """ @@ -166,6 +172,8 @@ def test_shell_cmd_6(plugin): shelly = ShellCommandTask(name="shelly", executable=cmd_exec, args=cmd_args).split( splitter=["executable", "args"] ) + shelly.cache_dir = tmpdir + assert shelly.inputs.executable == ["echo", ["echo", "-n"]] assert shelly.inputs.args == ["nipype", "pydra"] assert shelly.cmdline == [ @@ -197,7 +205,7 @@ def test_shell_cmd_6(plugin): ) -def test_shell_cmd_7(plugin): +def test_shell_cmd_7(plugin, tmpdir): """ a command with arguments, outer splitter for executable and args, and combiner=args """ @@ -209,6 +217,8 @@ def test_shell_cmd_7(plugin): .split(splitter=["executable", "args"]) .combine("args") ) + shelly.cache_dir = tmpdir + assert shelly.inputs.executable == ["echo", ["echo", "-n"]] assert shelly.inputs.args == ["nipype", "pydra"] @@ -224,7 +234,7 @@ def test_shell_cmd_7(plugin): # tests with workflows -def test_wf_shell_cmd_1(plugin): +def test_wf_shell_cmd_1(plugin, tmpdir): """ a workflow with two connected commands""" wf = Workflow(name="wf", input_spec=["cmd1", "cmd2"]) wf.inputs.cmd1 = "pwd" @@ -237,6 +247,7 @@ def test_wf_shell_cmd_1(plugin): ) wf.set_output([("out", wf.shelly_ls.lzout.stdout)]) + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: wf(submitter=sub) @@ -427,13 +438,14 @@ def test_shell_cmd_inputspec_3b(plugin, results_function, tmpdir): name="shelly", executable=cmd_exec, input_spec=my_input_spec ) shelly.inputs.text = hello + assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "HELLO\n" -def test_shell_cmd_inputspec_3c_exception(plugin): +def test_shell_cmd_inputspec_3c_exception(plugin, tmpdir): """ mandatory field added to fields, value is not provided, so exception is raised """ cmd_exec = "echo" my_input_spec = SpecInfo( @@ -458,6 +470,8 @@ def test_shell_cmd_inputspec_3c_exception(plugin): shelly = ShellCommandTask( name="shelly", executable=cmd_exec, input_spec=my_input_spec ) + shelly.cache_dir = tmpdir + with pytest.raises(Exception) as excinfo: shelly() assert "mandatory" in str(excinfo.value) @@ -491,6 +505,8 @@ def test_shell_cmd_inputspec_3c(plugin, results_function, tmpdir): shelly = ShellCommandTask( name="shelly", executable=cmd_exec, input_spec=my_input_spec ) + shelly.cache_dir = tmpdir + assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo" res = results_function(shelly, plugin, tmpdir) @@ -520,6 +536,7 @@ def test_shell_cmd_inputspec_4(plugin, results_function, tmpdir): shelly = ShellCommandTask( name="shelly", executable=cmd_exec, input_spec=my_input_spec ) + shelly.cache_dir = tmpdir assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hello" @@ -1742,7 +1759,7 @@ def test_shell_cmd_inputspec_copyfile_state_1(plugin, results_function, tmpdir): @pytest.mark.flaky(reruns=2) # when dask -def test_wf_shell_cmd_2(plugin_dask_opt): +def test_wf_shell_cmd_2(plugin_dask_opt, tmpdir): """ a workflow with input with defined output_file_template (str) that requires wf.lzin """ @@ -1750,6 +1767,7 @@ def test_wf_shell_cmd_2(plugin_dask_opt): wf.inputs.cmd = "touch" wf.inputs.args = "newfile.txt" + wf.cache_dir = tmpdir my_input_spec = SpecInfo( name="Input", @@ -1788,7 +1806,7 @@ def test_wf_shell_cmd_2(plugin_dask_opt): assert res.output.out_f.parent == wf.output_dir -def test_wf_shell_cmd_2a(plugin): +def test_wf_shell_cmd_2a(plugin, tmpdir): """ a workflow with input with defined output_file_template (tuple) that requires wf.lzin """ @@ -1796,6 +1814,7 @@ def test_wf_shell_cmd_2a(plugin): wf.inputs.cmd = "touch" wf.inputs.args = "newfile.txt" + wf.cache_dir = tmpdir my_input_spec = SpecInfo( name="Input", @@ -1833,7 +1852,7 @@ def test_wf_shell_cmd_2a(plugin): assert res.output.out_f.exists() -def test_wf_shell_cmd_3(plugin): +def test_wf_shell_cmd_3(plugin, tmpdir): """ a workflow with 2 tasks, first one has input with output_file_template (str, uses wf.lzin), that is passed to the second task @@ -1843,6 +1862,7 @@ def test_wf_shell_cmd_3(plugin): wf.inputs.cmd1 = "touch" wf.inputs.cmd2 = "cp" wf.inputs.args = "newfile.txt" + wf.cache_dir = tmpdir my_input_spec1 = SpecInfo( name="Input", @@ -1929,7 +1949,7 @@ def test_wf_shell_cmd_3(plugin): assert res.output.cp_file.parent == wf.output_dir -def test_wf_shell_cmd_3a(plugin): +def test_wf_shell_cmd_3a(plugin, tmpdir): """ a workflow with 2 tasks, first one has input with output_file_template (str, uses wf.lzin), that is passed to the second task @@ -1939,6 +1959,7 @@ def test_wf_shell_cmd_3a(plugin): wf.inputs.cmd1 = "touch" wf.inputs.cmd2 = "cp" wf.inputs.args = "newfile.txt" + wf.cache_dir = tmpdir my_input_spec1 = SpecInfo( name="Input", @@ -2120,7 +2141,7 @@ def test_wf_shell_cmd_state_1(plugin): assert res.output.cp_file.parent == wf.output_dir[i] -def test_wf_shell_cmd_ndst_1(plugin): +def test_wf_shell_cmd_ndst_1(plugin, tmpdir): """ a workflow with 2 tasks and a splitter on the node level, first one has input with output_file_template (str, uses wf.lzin), that is passed to the second task @@ -2130,6 +2151,7 @@ def test_wf_shell_cmd_ndst_1(plugin): wf.inputs.cmd1 = "touch" wf.inputs.cmd2 = "cp" wf.inputs.args = ["newfile_1.txt", "newfile_2.txt"] + wf.cache_dir = tmpdir my_input_spec1 = SpecInfo( name="Input", @@ -2228,7 +2250,9 @@ def test_shell_cmd_outputspec_1(plugin, results_function, tmpdir): fields=[("newfile", File, "newfile_tmp.txt")], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" @@ -2246,14 +2270,16 @@ def test_shell_cmd_outputspec_1a(plugin, results_function, tmpdir): fields=[("newfile", attr.ib(type=File, default="newfile_tmp.txt"))], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.newfile.exists() -def test_shell_cmd_outputspec_1b_exception(plugin): +def test_shell_cmd_outputspec_1b_exception(plugin, tmpdir): """ customised output_spec, adding files to the output, providing specific pathname """ @@ -2263,7 +2289,9 @@ def test_shell_cmd_outputspec_1b_exception(plugin): fields=[("newfile", File, "newfile_tmp_.txt")], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) with pytest.raises(Exception) as exinfo: with Submitter(plugin=plugin) as sub: @@ -2283,14 +2311,16 @@ def test_shell_cmd_outputspec_2(plugin, results_function, tmpdir): fields=[("newfile", File, "newfile_*.txt")], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" assert res.output.newfile.exists() -def test_shell_cmd_outputspec_2a_exception(plugin): +def test_shell_cmd_outputspec_2a_exception(plugin, tmpdir): """ customised output_spec, adding files to the output, using a wildcard in default @@ -2301,7 +2331,9 @@ def test_shell_cmd_outputspec_2a_exception(plugin): fields=[("newfile", File, "newfile_*K.txt")], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) with pytest.raises(Exception) as excinfo: with Submitter(plugin=plugin) as sub: @@ -2321,7 +2353,9 @@ def test_shell_cmd_outputspec_3(plugin, results_function, tmpdir): fields=[("newfile", File, "newfile_*.txt")], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" @@ -2347,7 +2381,9 @@ def gather_output(keyname, output_dir): fields=[("newfile", attr.ib(type=File, metadata={"callable": gather_output}))], bases=(ShellOutSpec,), ) - shelly = ShellCommandTask(name="shelly", executable=cmd, output_spec=my_output_spec) + shelly = ShellCommandTask( + name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir + ) res = results_function(shelly, plugin, tmpdir) assert res.output.stdout == "" @@ -2459,7 +2495,7 @@ def test_shell_cmd_state_outputspec_1(plugin, results_function, tmpdir): # customised output_spec for tasks in workflows -def test_shell_cmd_outputspec_wf_1(plugin): +def test_shell_cmd_outputspec_wf_1(plugin, tmpdir): """ customised output_spec for tasks within a Workflow, adding files to the output, providing specific pathname @@ -2468,6 +2504,7 @@ def test_shell_cmd_outputspec_wf_1(plugin): cmd = ["touch", "newfile_tmp.txt"] wf = Workflow(name="wf", input_spec=["cmd"]) wf.inputs.cmd = cmd + wf.cache_dir = tmpdir my_output_spec = SpecInfo( name="Output", From 6f8c3a7dff9dd26020d767ce31a007895d8f7756 Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Tue, 13 Oct 2020 22:59:39 +0800 Subject: [PATCH 09/12] add tmpdir to tests on non-nested workflow in test_workflow.py --- pydra/engine/tests/test_workflow.py | 38 +++++++++++++++++------------ 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/pydra/engine/tests/test_workflow.py b/pydra/engine/tests/test_workflow.py index c34e004d2d..7bfb35f7d4 100644 --- a/pydra/engine/tests/test_workflow.py +++ b/pydra/engine/tests/test_workflow.py @@ -3599,6 +3599,7 @@ def test_wf_lzoutall_1(plugin, tmpdir): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3620,6 +3621,7 @@ def test_wf_lzoutall_1a(plugin, tmpdir): wf.inputs.x = 2 wf.inputs.y = 3 wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3641,6 +3643,7 @@ def test_wf_lzoutall_st_1(plugin, tmpdir): wf.inputs.x = [2, 20] wf.inputs.y = [3, 30] wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3662,6 +3665,7 @@ def test_wf_lzoutall_st_1a(plugin, tmpdir): wf.inputs.x = [2, 20] wf.inputs.y = [3, 30] wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3690,6 +3694,7 @@ def test_wf_lzoutall_st_2(plugin, tmpdir): wf.inputs.x = [2, 20] wf.inputs.y = [3, 30] wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3714,6 +3719,7 @@ def test_wf_lzoutall_st_2a(plugin, tmpdir): wf.inputs.x = [2, 20] wf.inputs.y = [3, 30] wf.plugin = plugin + wf.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf) @@ -3731,7 +3737,7 @@ def test_wf_lzoutall_st_2a(plugin, tmpdir): def test_wf_resultfile_1(plugin, tmpdir): """ workflow with a file in the result, file should be copied to the wf dir""" - wf = Workflow(name="wf_file_1", input_spec=["x"]) + wf = Workflow(name="wf_file_1", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_write_file(name="writefile", filename=wf.lzin.x)) wf.inputs.x = "file_1.txt" wf.plugin = plugin @@ -3750,7 +3756,7 @@ def test_wf_resultfile_2(plugin, tmpdir): """ workflow with a list of files in the wf result, all files should be copied to the wf dir """ - wf = Workflow(name="wf_file_1", input_spec=["x"]) + wf = Workflow(name="wf_file_1", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_write_file_list(name="writefile", filename_list=wf.lzin.x)) file_list = ["file_1.txt", "file_2.txt", "file_3.txt"] wf.inputs.x = file_list @@ -3771,7 +3777,7 @@ def test_wf_resultfile_3(plugin, tmpdir): """ workflow with a dictionaries of files in the wf result, all files should be copied to the wf dir """ - wf = Workflow(name="wf_file_1", input_spec=["x"]) + wf = Workflow(name="wf_file_1", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_write_file_list2dict(name="writefile", filename_list=wf.lzin.x)) file_list = ["file_1.txt", "file_2.txt", "file_3.txt"] wf.inputs.x = file_list @@ -3794,7 +3800,7 @@ def test_wf_resultfile_3(plugin, tmpdir): def test_wf_upstream_error1(plugin, tmpdir): """ workflow with two tasks, task2 dependent on an task1 which raised an error""" - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = "hi" # TypeError for adding str and int wf.plugin = plugin @@ -3812,7 +3818,7 @@ def test_wf_upstream_error2(plugin, tmpdir): """ task2 dependent on task1, task1 errors, workflow-level split on task 1 goal - workflow finish running, one output errors but the other doesn't """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = [1, "hi"] # TypeError for adding str and int wf.split("x") # workflow-level split @@ -3831,7 +3837,7 @@ def test_wf_upstream_error3(plugin, tmpdir): """ task2 dependent on task1, task1 errors, task-level split on task 1 goal - workflow finish running, one output errors but the other doesn't """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = [1, "hi"] # TypeError for adding str and int wf.addvar1.split("a") # task-level split @@ -3848,7 +3854,7 @@ def test_wf_upstream_error3(plugin, tmpdir): def test_wf_upstream_error4(plugin, tmpdir): """ workflow with one task, which raises an error""" - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = "hi" # TypeError for adding str and int wf.plugin = plugin @@ -3863,7 +3869,7 @@ def test_wf_upstream_error4(plugin, tmpdir): def test_wf_upstream_error5(plugin, tmpdir): """ nested workflow with one task, which raises an error""" - wf_main = Workflow(name="wf_main", input_spec=["x"]) + wf_main = Workflow(name="wf_main", input_spec=["x"], cache_dir=tmpdir) wf = Workflow(name="wf", input_spec=["x"], x=wf_main.lzin.x) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.plugin = plugin @@ -3883,7 +3889,7 @@ def test_wf_upstream_error5(plugin, tmpdir): def test_wf_upstream_error6(plugin, tmpdir): """ nested workflow with two tasks, the first one raises an error""" - wf_main = Workflow(name="wf_main", input_spec=["x"]) + wf_main = Workflow(name="wf_main", input_spec=["x"], cache_dir=tmpdir) wf = Workflow(name="wf", input_spec=["x"], x=wf_main.lzin.x) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.add(fun_addvar_default(name="addvar2", a=wf.addvar1.lzout.out)) @@ -3907,7 +3913,7 @@ def test_wf_upstream_error7(plugin, tmpdir): workflow with three sequential tasks, the first task raises an error the last task is set as the workflow output """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = "hi" # TypeError for adding str and int wf.plugin = plugin @@ -3929,7 +3935,7 @@ def test_wf_upstream_error7a(plugin, tmpdir): workflow with three sequential tasks, the first task raises an error the second task is set as the workflow output """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = "hi" # TypeError for adding str and int wf.plugin = plugin @@ -3951,7 +3957,7 @@ def test_wf_upstream_error7b(plugin, tmpdir): workflow with three sequential tasks, the first task raises an error the second and the third tasks are set as the workflow output """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = "hi" # TypeError for adding str and int wf.plugin = plugin @@ -3970,7 +3976,7 @@ def test_wf_upstream_error7b(plugin, tmpdir): def test_wf_upstream_error8(plugin, tmpdir): """ workflow with three tasks, the first one raises an error, so 2 others are removed""" - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = "hi" # TypeError for adding str and int wf.plugin = plugin @@ -3994,7 +4000,7 @@ def test_wf_upstream_error9(plugin, tmpdir): one branch has an error, the second is fine the errored branch is connected to the workflow output """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = 2 wf.add(fun_addvar(name="err", a=wf.addvar1.lzout.out, b="hi")) @@ -4021,7 +4027,7 @@ def test_wf_upstream_error9a(plugin, tmpdir): the branch without error is connected to the workflow output so the workflow finished clean """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = 2 wf.add(fun_addvar(name="err", a=wf.addvar1.lzout.out, b="hi")) @@ -4044,7 +4050,7 @@ def test_wf_upstream_error9b(plugin, tmpdir): one branch has an error, the second is fine both branches are connected to the workflow output """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wf.add(fun_addvar_default(name="addvar1", a=wf.lzin.x)) wf.inputs.x = 2 wf.add(fun_addvar(name="err", a=wf.addvar1.lzout.out, b="hi")) From 1b973003f15d7f60ef262d7adbe447a31a7b2c5a Mon Sep 17 00:00:00 2001 From: Nicol Lo Date: Wed, 28 Oct 2020 10:30:10 +0800 Subject: [PATCH 10/12] mark test_slurm_cancel_rerun tests in test_submitter as flaky --- pydra/engine/tests/test_boutiques.py | 7 +- pydra/engine/tests/test_shelltask.py | 228 ++++++++++++++++++--------- pydra/engine/tests/utils.py | 5 +- 3 files changed, 164 insertions(+), 76 deletions(-) diff --git a/pydra/engine/tests/test_boutiques.py b/pydra/engine/tests/test_boutiques.py index 5399b2e88a..6e503698dc 100644 --- a/pydra/engine/tests/test_boutiques.py +++ b/pydra/engine/tests/test_boutiques.py @@ -31,7 +31,8 @@ def test_boutiques_1(maskfile, plugin, results_function, tmpdir): btask = BoshTask(name="NA", zenodo_id="1482743") btask.inputs.infile = Infile btask.inputs.maskfile = maskfile - res = results_function(btask, plugin, tmpdir) + btask.cache_dir = tmpdir + res = results_function(btask, plugin) assert res.output.return_code == 0 @@ -102,6 +103,7 @@ def test_boutiques_wf_1(maskfile, plugin): wf = Workflow(name="wf", input_spec=["maskfile", "infile"]) wf.inputs.maskfile = maskfile wf.inputs.infile = Infile + wf.cache_dir = tmpdir wf.add( BoshTask( @@ -128,11 +130,12 @@ def test_boutiques_wf_1(maskfile, plugin): @pytest.mark.parametrize( "maskfile", ["test_brain.nii.gz", "test_brain", "test_brain.nii"] ) -def test_boutiques_wf_2(maskfile, plugin): +def test_boutiques_wf_2(maskfile, plugin, tmdpir): """ wf with two BoshTasks (fsl.bet and fsl.stats) and one ShellTask""" wf = Workflow(name="wf", input_spec=["maskfile", "infile"]) wf.inputs.maskfile = maskfile wf.inputs.infile = Infile + wf.cache_dir = tmpdir wf.add( BoshTask( diff --git a/pydra/engine/tests/test_shelltask.py b/pydra/engine/tests/test_shelltask.py index bf93cbd085..e270fbbd1e 100644 --- a/pydra/engine/tests/test_shelltask.py +++ b/pydra/engine/tests/test_shelltask.py @@ -20,10 +20,11 @@ def test_shell_cmd_1(plugin_dask_opt, results_function, tmpdir): """ simple command, no arguments """ cmd = ["pwd"] - shelly = ShellCommandTask(name="shelly", executable=cmd) + shelly = ShellCommandTask(name="shelly", executable=cmd, cache_dir=tmpdir) + shelly.cache_dir = tmpdir assert shelly.cmdline == " ".join(cmd) - res = results_function(shelly, plugin=plugin_dask_opt, tmpdir=tmpdir) + res = results_function(shelly, plugin=plugin_dask_opt) assert Path(res.output.stdout.rstrip()) == shelly.output_dir assert res.output.return_code == 0 assert res.output.stderr == "" @@ -36,9 +37,10 @@ def test_shell_cmd_1_strip(plugin, results_function, tmpdir): """ cmd = ["pwd"] shelly = ShellCommandTask(name="shelly", executable=cmd, strip=True) + shelly.cache_dir = tmpdir assert shelly.cmdline == " ".join(cmd) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert Path(res.output.stdout) == Path(shelly.output_dir) assert res.output.return_code == 0 assert res.output.stderr == "" @@ -49,9 +51,10 @@ def test_shell_cmd_2(plugin, results_function, tmpdir): """ a command with arguments, cmd and args given as executable """ cmd = ["echo", "hail", "pydra"] shelly = ShellCommandTask(name="shelly", executable=cmd) + shelly.cache_dir = tmpdir assert shelly.cmdline == " ".join(cmd) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout.strip() == " ".join(cmd[1:]) assert res.output.return_code == 0 assert res.output.stderr == "" @@ -64,10 +67,11 @@ def test_shell_cmd_2a(plugin, results_function, tmpdir): cmd_args = ["hail", "pydra"] # separate command into exec + args shelly = ShellCommandTask(name="shelly", executable=cmd_exec, args=cmd_args) + shelly.cache_dir = tmpdir assert shelly.inputs.executable == "echo" assert shelly.cmdline == "echo " + " ".join(cmd_args) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout.strip() == " ".join(cmd_args) assert res.output.return_code == 0 assert res.output.stderr == "" @@ -80,10 +84,11 @@ def test_shell_cmd_2b(plugin, results_function, tmpdir): cmd_args = "pydra" # separate command into exec + args shelly = ShellCommandTask(name="shelly", executable=cmd_exec, args=cmd_args) + shelly.cache_dir = tmpdir assert shelly.inputs.executable == "echo" assert shelly.cmdline == "echo pydra" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "pydra\n" assert res.output.return_code == 0 assert res.output.stderr == "" @@ -290,12 +295,14 @@ def test_shell_cmd_inputspec_1(plugin, results_function, use_validator, tmpdir): args=cmd_args, opt_n=cmd_opt, input_spec=my_input_spec, + cache_dir=tmpdir, ) + shelly.cache_dir = tmpdir assert shelly.inputs.executable == cmd_exec assert shelly.inputs.args == cmd_args assert shelly.cmdline == "echo -n hello from pydra" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "hello from pydra" @@ -338,11 +345,12 @@ def test_shell_cmd_inputspec_2(plugin, results_function, use_validator, tmpdir): opt_n=cmd_opt, opt_hello=cmd_opt_hello, input_spec=my_input_spec, + cache_dir=tmpdir, ) assert shelly.inputs.executable == cmd_exec assert shelly.inputs.args == cmd_args assert shelly.cmdline == "echo -n HELLO from pydra" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "HELLO from pydra" @@ -372,11 +380,15 @@ def test_shell_cmd_inputspec_3(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, text=hello, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + text=hello, + input_spec=my_input_spec, + cache_dir=tmpdir, ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "HELLO\n" @@ -401,11 +413,15 @@ def test_shell_cmd_inputspec_3a(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, text=hello, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + text=hello, + input_spec=my_input_spec, + cache_dir=tmpdir, ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "HELLO\n" @@ -435,13 +451,13 @@ def test_shell_cmd_inputspec_3b(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, input_spec=my_input_spec + name="shelly", executable=cmd_exec, input_spec=my_input_spec, cache_dir=tmpdir ) shelly.inputs.text = hello assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "HELLO\n" @@ -468,9 +484,8 @@ def test_shell_cmd_inputspec_3c_exception(plugin, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, input_spec=my_input_spec + name="shelly", executable=cmd_exec, input_spec=my_input_spec, cache_dir=tmpdir ) - shelly.cache_dir = tmpdir with pytest.raises(Exception) as excinfo: shelly() @@ -503,13 +518,12 @@ def test_shell_cmd_inputspec_3c(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, input_spec=my_input_spec + name="shelly", executable=cmd_exec, input_spec=my_input_spec, cache_dir=tmpdir ) - shelly.cache_dir = tmpdir assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "\n" @@ -534,14 +548,13 @@ def test_shell_cmd_inputspec_4(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, input_spec=my_input_spec + name="shelly", executable=cmd_exec, input_spec=my_input_spec, cache_dir=tmpdir ) - shelly.cache_dir = tmpdir assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hello" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "Hello\n" @@ -561,13 +574,13 @@ def test_shell_cmd_inputspec_4a(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, input_spec=my_input_spec + name="shelly", executable=cmd_exec, input_spec=my_input_spec, cache_dir=tmpdir ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hello" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "Hello\n" @@ -592,13 +605,13 @@ def test_shell_cmd_inputspec_4b(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, input_spec=my_input_spec + name="shelly", executable=cmd_exec, input_spec=my_input_spec, cache_dir=tmpdir ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "echo Hi" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "Hi\n" @@ -708,14 +721,18 @@ def test_shell_cmd_inputspec_5_nosubm(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, opt_t=cmd_t, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + opt_t=cmd_t, + input_spec=my_input_spec, + cache_dir=tmpdir, ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -t" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) -def test_shell_cmd_inputspec_5a_exception(plugin): +def test_shell_cmd_inputspec_5a_exception(plugin, tmpdir): """ checking xor in metadata: both options are True, so the task raises exception""" cmd_exec = "ls" cmd_t = True @@ -757,6 +774,7 @@ def test_shell_cmd_inputspec_5a_exception(plugin): opt_t=cmd_t, opt_S=cmd_S, input_spec=my_input_spec, + cache_dir=tmpdir, ) with pytest.raises(Exception) as excinfo: shelly() @@ -807,7 +825,7 @@ def test_shell_cmd_inputspec_6(plugin, results_function, tmpdir): ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -l -t" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) def test_shell_cmd_inputspec_6a_exception(plugin): @@ -891,11 +909,12 @@ def test_shell_cmd_inputspec_6b(plugin, results_function, tmpdir): opt_t=cmd_t, # opt_l=cmd_l, input_spec=my_input_spec, + cache_dir=tmpdir, ) shelly.inputs.opt_l = cmd_l assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -l -t" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) @pytest.mark.parametrize("results_function", [result_no_submitter, result_submitter]) @@ -925,10 +944,14 @@ def test_shell_cmd_inputspec_7(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, args=args, input_spec=my_input_spec + name="shelly", + executable=cmd, + args=args, + input_spec=my_input_spec, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out1.exists() # checking if the file is created in a good place @@ -965,10 +988,14 @@ def test_shell_cmd_inputspec_7a(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, args=args, input_spec=my_input_spec + name="shelly", + executable=cmd, + args=args, + input_spec=my_input_spec, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out1_changed.exists() # checking if the file is created in a good place @@ -1013,9 +1040,10 @@ def test_shell_cmd_inputspec_7b(plugin, results_function, tmpdir): executable=cmd, newfile="newfile_tmp.txt", input_spec=my_input_spec, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out1.exists() @@ -1069,9 +1097,10 @@ def test_shell_cmd_inputspec_8(plugin, results_function, tmpdir): newfile="newfile_tmp.txt", time="02121010", input_spec=my_input_spec, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out1.exists() @@ -1125,9 +1154,10 @@ def test_shell_cmd_inputspec_8a(plugin, results_function, tmpdir): newfile="newfile_tmp.txt", time="02121010", input_spec=my_input_spec, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out1.exists() @@ -1168,10 +1198,14 @@ def test_shell_cmd_inputspec_9(tmpdir, plugin, results_function): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, file_orig=file + name="shelly", + executable=cmd, + input_spec=my_input_spec, + file_orig=file, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.file_copy.exists() assert res.output.file_copy.name == "file_copy.txt" @@ -1216,10 +1250,14 @@ def test_shell_cmd_inputspec_9a(tmpdir, plugin, results_function): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, file_orig=file + name="shelly", + executable=cmd, + input_spec=my_input_spec, + file_orig=file, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.file_copy.exists() assert res.output.file_copy.name == "file_copy" @@ -1263,10 +1301,14 @@ def test_shell_cmd_inputspec_9b(tmpdir, plugin, results_function): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, file_orig=file + name="shelly", + executable=cmd, + input_spec=my_input_spec, + file_orig=file, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.file_copy.exists() assert res.output.file_copy.name == "file" @@ -1308,11 +1350,15 @@ def test_shell_cmd_inputspec_10(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, files=files_list, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + files=files_list, + input_spec=my_input_spec, + cache_dir=tmpdir, ) assert shelly.inputs.executable == cmd_exec - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "hello from boston" @@ -1359,10 +1405,14 @@ def test_shell_cmd_inputspec_copyfile_1(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=str(file) + name="shelly", + executable=cmd, + input_spec=my_input_spec, + orig_file=str(file), + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out_file.exists() # the file is copied, and than it is changed in place @@ -1417,10 +1467,14 @@ def test_shell_cmd_inputspec_copyfile_1a(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=str(file) + name="shelly", + executable=cmd, + input_spec=my_input_spec, + orig_file=str(file), + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out_file.exists() # the file is uses a soft link, but it creates and an extra copy before modifying @@ -1489,10 +1543,14 @@ def test_shell_cmd_inputspec_copyfile_1b(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=str(file) + name="shelly", + executable=cmd, + input_spec=my_input_spec, + orig_file=str(file), + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out_file.exists() # the file is not copied, it is changed in place @@ -1527,12 +1585,16 @@ def test_shell_cmd_inputspec_state_1(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, text=hello, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + text=hello, + input_spec=my_input_spec, + cache_dir=tmpdir, ).split("text") assert shelly.inputs.executable == cmd_exec # todo: this doesn't work when state # assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res[0].output.stdout == "HELLO\n" assert res[1].output.stdout == "hi\n" @@ -1602,11 +1664,15 @@ def test_shell_cmd_inputspec_state_1a(plugin, results_function, tmpdir): # separate command into exec + args shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, text=hello, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + text=hello, + input_spec=my_input_spec, + cache_dir=tmpdir, ).split("text") assert shelly.inputs.executable == cmd_exec - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res[0].output.stdout == "HELLO\n" assert res[1].output.stdout == "hi\n" @@ -1637,10 +1703,14 @@ def test_shell_cmd_inputspec_state_2(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, args=args, input_spec=my_input_spec + name="shelly", + executable=cmd, + args=args, + input_spec=my_input_spec, + cache_dir=tmpdir, ).split("args") - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) for i in range(len(args)): assert res[i].output.stdout == "" assert res[i].output.out1.exists() @@ -1681,13 +1751,17 @@ def test_shell_cmd_inputspec_state_3(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd_exec, file=files, input_spec=my_input_spec + name="shelly", + executable=cmd_exec, + file=files, + input_spec=my_input_spec, + cache_dir=tmpdir, ).split("file") assert shelly.inputs.executable == cmd_exec # todo: this doesn't work when state # assert shelly.cmdline == "echo HELLO" - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res[0].output.stdout == "hello from pydra" assert res[1].output.stdout == "have a nice one" @@ -1738,11 +1812,15 @@ def test_shell_cmd_inputspec_copyfile_state_1(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, input_spec=my_input_spec, orig_file=files + name="shelly", + executable=cmd, + input_spec=my_input_spec, + orig_file=files, + cache_dir=tmpdir, ).split("orig_file") txt_l = ["from pydra", "world"] - res_l = results_function(shelly, plugin, tmpdir) + res_l = results_function(shelly, plugin) for i, res in enumerate(res_l): assert res.output.stdout == "" assert res.output.out_file.exists() @@ -2254,7 +2332,7 @@ def test_shell_cmd_outputspec_1(plugin, results_function, tmpdir): name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.newfile.exists() @@ -2274,7 +2352,7 @@ def test_shell_cmd_outputspec_1a(plugin, results_function, tmpdir): name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.newfile.exists() @@ -2315,7 +2393,7 @@ def test_shell_cmd_outputspec_2(plugin, results_function, tmpdir): name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.newfile.exists() @@ -2357,7 +2435,7 @@ def test_shell_cmd_outputspec_3(plugin, results_function, tmpdir): name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" # newfile is a list assert len(res.output.newfile) == 2 @@ -2385,7 +2463,7 @@ def gather_output(keyname, output_dir): name="shelly", executable=cmd, output_spec=my_output_spec, cache_dir=tmpdir ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" # newfile is a list assert len(res.output.newfile) == 2 @@ -2419,10 +2497,14 @@ def test_shell_cmd_outputspec_5(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, args=args, output_spec=my_output_spec + name="shelly", + executable=cmd, + args=args, + output_spec=my_output_spec, + cache_dir=tmpdir, ) - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) assert res.output.stdout == "" assert res.output.out1.exists() @@ -2483,10 +2565,14 @@ def test_shell_cmd_state_outputspec_1(plugin, results_function, tmpdir): ) shelly = ShellCommandTask( - name="shelly", executable=cmd, args=args, output_spec=my_output_spec + name="shelly", + executable=cmd, + args=args, + output_spec=my_output_spec, + cache_dir=tmpdir, ).split("args") - res = results_function(shelly, plugin, tmpdir) + res = results_function(shelly, plugin) for i in range(len(args)): assert res[i].output.stdout == "" assert res[i].output.out1.exists() diff --git a/pydra/engine/tests/utils.py b/pydra/engine/tests/utils.py index edc2533c28..b2fbdab762 100644 --- a/pydra/engine/tests/utils.py +++ b/pydra/engine/tests/utils.py @@ -23,16 +23,15 @@ ) -def result_no_submitter(shell_task, plugin=None, tmpdir=None): +def result_no_submitter(shell_task, plugin=None): """ helper function to return result when running without submitter """ return shell_task() -def result_submitter(shell_task, plugin, tmpdir): +def result_submitter(shell_task, plugin): """ helper function to return result when running with submitter with specific plugin """ - shell_task.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: shell_task(submitter=sub) return shell_task.result() From 56785abab70471e75eb3e0a6f5627b6a4e022456 Mon Sep 17 00:00:00 2001 From: Dorota Jarecka Date: Sun, 8 Nov 2020 19:45:55 -0500 Subject: [PATCH 11/12] fixing bosh tests --- pydra/engine/tests/test_boutiques.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydra/engine/tests/test_boutiques.py b/pydra/engine/tests/test_boutiques.py index 6e503698dc..4f6665bac3 100644 --- a/pydra/engine/tests/test_boutiques.py +++ b/pydra/engine/tests/test_boutiques.py @@ -98,7 +98,7 @@ def test_boutiques_spec_2(): @pytest.mark.parametrize( "maskfile", ["test_brain.nii.gz", "test_brain", "test_brain.nii"] ) -def test_boutiques_wf_1(maskfile, plugin): +def test_boutiques_wf_1(maskfile, plugin, tmpdir): """ wf with one task that runs fsl.bet using BoshTask""" wf = Workflow(name="wf", input_spec=["maskfile", "infile"]) wf.inputs.maskfile = maskfile @@ -130,7 +130,7 @@ def test_boutiques_wf_1(maskfile, plugin): @pytest.mark.parametrize( "maskfile", ["test_brain.nii.gz", "test_brain", "test_brain.nii"] ) -def test_boutiques_wf_2(maskfile, plugin, tmdpir): +def test_boutiques_wf_2(maskfile, plugin, tmpdir): """ wf with two BoshTasks (fsl.bet and fsl.stats) and one ShellTask""" wf = Workflow(name="wf", input_spec=["maskfile", "infile"]) wf.inputs.maskfile = maskfile From 7669b814f7d7a3618c08f19a00e2d864fa223ad8 Mon Sep 17 00:00:00 2001 From: Dorota Jarecka Date: Tue, 10 Nov 2020 18:43:26 -0500 Subject: [PATCH 12/12] fixing cache_dir settings for workflow used as a node; addressing issues with submitter/slurm that the task might finish, but theresults are not available right away (repeating request for avalibale tasks for another 60s); fixing some tests so they use tmpdir --- pydra/engine/submitter.py | 21 ++++- pydra/engine/tests/test_shelltask.py | 4 +- pydra/engine/tests/test_submitter.py | 1 - pydra/engine/tests/test_workflow.py | 120 ++++++++++++++++++++++----- 4 files changed, 118 insertions(+), 28 deletions(-) diff --git a/pydra/engine/submitter.py b/pydra/engine/submitter.py index cd8c61b84b..56d711de1c 100644 --- a/pydra/engine/submitter.py +++ b/pydra/engine/submitter.py @@ -1,5 +1,6 @@ """Handle execution backends.""" import asyncio +import time from .workers import SerialWorker, ConcurrentFuturesWorker, SlurmWorker, DaskWorker from .core import is_workflow from .helpers import get_open_loop, load_and_run_async @@ -150,15 +151,31 @@ async def _run_workflow(self, wf, rerun=False): The computed workflow """ + for nd in wf.graph.nodes: + if nd.allow_cache_override: + nd.cache_dir = wf.cache_dir + # creating a copy of the graph that will be modified # the copy contains new lists with original runnable objects graph_copy = wf.graph.copy() # keep track of pending futures task_futures = set() tasks, tasks_follow_errored = get_runnable_tasks(graph_copy) - while tasks or len(task_futures): + while tasks or task_futures or graph_copy.nodes: if not tasks and not task_futures: - raise Exception("Nothing queued or todo - something went wrong") + # it's possible that task_futures is empty, but not able to get any + # tasks from graph_copy (using get_runnable_tasks) + # this might be related to some delays saving the files + # so try to get_runnable_tasks for another minut + ii = 0 + while not tasks and graph_copy.nodes: + tasks, follow_err = get_runnable_tasks(graph_copy) + ii += 1 + time.sleep(1) + if ii > 60: + raise Exception( + "graph is not empty, but not able to get more tasks - something is wrong (e.g. with the filesystem)" + ) for task in tasks: # grab inputs if needed logger.debug(f"Retrieving inputs for {task}") diff --git a/pydra/engine/tests/test_shelltask.py b/pydra/engine/tests/test_shelltask.py index 31ce8f32c9..bbcf637cb7 100644 --- a/pydra/engine/tests/test_shelltask.py +++ b/pydra/engine/tests/test_shelltask.py @@ -21,7 +21,6 @@ def test_shell_cmd_1(plugin_dask_opt, results_function, tmpdir): """ simple command, no arguments """ cmd = ["pwd"] shelly = ShellCommandTask(name="shelly", executable=cmd, cache_dir=tmpdir) - shelly.cache_dir = tmpdir assert shelly.cmdline == " ".join(cmd) res = results_function(shelly, plugin=plugin_dask_opt) @@ -297,7 +296,6 @@ def test_shell_cmd_inputspec_1(plugin, results_function, use_validator, tmpdir): input_spec=my_input_spec, cache_dir=tmpdir, ) - shelly.cache_dir = tmpdir assert shelly.inputs.executable == cmd_exec assert shelly.inputs.args == cmd_args assert shelly.cmdline == "echo -n hello from pydra" @@ -822,6 +820,7 @@ def test_shell_cmd_inputspec_6(plugin, results_function, tmpdir): opt_t=cmd_t, opt_l=cmd_l, input_spec=my_input_spec, + cache_dir=tmpdir, ) assert shelly.inputs.executable == cmd_exec assert shelly.cmdline == "ls -l -t" @@ -1400,6 +1399,7 @@ def test_shell_cmd_inputspec_10_err(tmpdir): shelly = ShellCommandTask( name="shelly", executable=cmd_exec, files=file_2, input_spec=my_input_spec ) + shelly.cache_dir = tmpdir with pytest.raises(AttributeError) as e: res = shelly() diff --git a/pydra/engine/tests/test_submitter.py b/pydra/engine/tests/test_submitter.py index 0b54cb5893..29e37a3c57 100644 --- a/pydra/engine/tests/test_submitter.py +++ b/pydra/engine/tests/test_submitter.py @@ -120,7 +120,6 @@ def test_wf2(plugin_dask_opt, tmpdir): wfnd.add(sleep_add_one(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.inputs.x = 2 - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) diff --git a/pydra/engine/tests/test_workflow.py b/pydra/engine/tests/test_workflow.py index 7bfb35f7d4..ecf84e5091 100644 --- a/pydra/engine/tests/test_workflow.py +++ b/pydra/engine/tests/test_workflow.py @@ -50,7 +50,7 @@ def test_wf_name_conflict2(): def test_wf_no_output(plugin, tmpdir): """ Raise error when output isn't set with set_output""" - wf = Workflow(name="wf_1", input_spec=["x"]) + wf = Workflow(name="wf_1", input_spec=["x"], cache_dir=tmpdir) wf.add(add2(name="add2", x=wf.lzin.x)) wf.inputs.x = 2 @@ -1713,7 +1713,6 @@ def test_wfasnd_1(plugin, tmpdir): wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.inputs.x = 2 - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1739,7 +1738,6 @@ def test_wfasnd_wfinp_1(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) - wfnd.cache_dir = tmpdir wf.add(wfnd) wf.inputs.x = 2 @@ -1767,7 +1765,6 @@ def test_wfasnd_wfndupdate(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=2) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"], x=3) wfnd.inputs.x = wf.lzin.x @@ -1821,6 +1818,7 @@ def test_wfasnd_wfndupdate_rerun(plugin, tmpdir): wf_o.add(wf) wf_o.set_output([("out", wf_o.wf.lzout.out)]) wf_o.plugin = plugin + wf_o.cache_dir = tmpdir with Submitter(plugin=plugin) as sub: sub(wf_o) @@ -1840,7 +1838,6 @@ def test_wfasnd_st_1(plugin, tmpdir): wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.split("x") wfnd.inputs.x = [2, 4] - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1868,7 +1865,6 @@ def test_wfasnd_st_updatespl_1(plugin, tmpdir): wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) wfnd.inputs.x = [2, 4] - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1897,7 +1893,6 @@ def test_wfasnd_ndst_1(plugin, tmpdir): # TODO: without this the test is failing wfnd.plugin = plugin wfnd.inputs.x = [2, 4] - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1925,7 +1920,6 @@ def test_wfasnd_ndst_updatespl_1(plugin, tmpdir): # TODO: without this the test is failing wfnd.plugin = plugin wfnd.inputs.x = [2, 4] - wfnd.cache_dir = tmpdir wf = Workflow(name="wf", input_spec=["x"]) wf.add(wfnd) @@ -1948,11 +1942,10 @@ def test_wfasnd_wfst_1(plugin, tmpdir): workflow-node with one task, splitter for the main workflow """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) - wfnd.cache_dir = tmpdir wf.add(wfnd) wf.split("x") @@ -1986,7 +1979,6 @@ def test_wfasnd_st_2(plugin, tmpdir): wfnd.split(("x", "y")) wfnd.inputs.x = [2, 4] wfnd.inputs.y = [1, 10] - wfnd.cache_dir = tmpdir wf = Workflow(name="wf_st_3", input_spec=["x", "y"]) wf.add(wfnd) @@ -2013,7 +2005,6 @@ def test_wfasnd_wfst_2(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x", "y"], x=wf.lzin.x, y=wf.lzin.y) wfnd.add(multiply(name="mult", x=wfnd.lzin.x, y=wfnd.lzin.y)) wfnd.set_output([("out", wfnd.mult.lzout.out)]) - wfnd.cache_dir = tmpdir wf.add(wfnd) wf.add(add2(name="add2", x=wf.wfnd.lzout.out)) @@ -2052,7 +2043,6 @@ def test_wfasnd_ndst_3(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) - wfnd.cache_dir = tmpdir wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) @@ -2082,7 +2072,6 @@ def test_wfasnd_wfst_3(plugin, tmpdir): wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) - wfnd.cache_dir = tmpdir wf.add(wfnd) wf.set_output([("out", wf.wfnd.lzout.out)]) @@ -2101,6 +2090,91 @@ def test_wfasnd_wfst_3(plugin, tmpdir): assert odir.exists() +# workflows with structures wfns(A->B) + + +def test_wfasnd_4(plugin, tmpdir): + """ workflow as a node + workflow-node with two tasks and no splitter + """ + wfnd = Workflow(name="wfnd", input_spec=["x"]) + wfnd.add(add2(name="add2_1st", x=wfnd.lzin.x)) + wfnd.add(add2(name="add2_2nd", x=wfnd.add2_1st.lzout.out)) + wfnd.set_output([("out", wfnd.add2_2nd.lzout.out)]) + wfnd.inputs.x = 2 + + wf = Workflow(name="wf", input_spec=["x"]) + wf.add(wfnd) + wf.set_output([("out", wf.wfnd.lzout.out)]) + wf.plugin = plugin + wf.cache_dir = tmpdir + + with Submitter(plugin=plugin) as sub: + sub(wf) + + results = wf.result() + assert results.output.out == 6 + # checking the output directory + assert wf.output_dir.exists() + + +def test_wfasnd_ndst_4(plugin, tmpdir): + """ workflow as a node + workflow-node with two tasks, + splitter for node + """ + wfnd = Workflow(name="wfnd", input_spec=["x"]) + wfnd.add(add2(name="add2_1st", x=wfnd.lzin.x).split("x")) + wfnd.add(add2(name="add2_2nd", x=wfnd.add2_1st.lzout.out)) + wfnd.set_output([("out", wfnd.add2_2nd.lzout.out)]) + # TODO: without this the test is failing + wfnd.plugin = plugin + wfnd.inputs.x = [2, 4] + + wf = Workflow(name="wf", input_spec=["x"]) + wf.add(wfnd) + wf.set_output([("out", wf.wfnd.lzout.out)]) + wf.plugin = plugin + wf.cache_dir = tmpdir + + with Submitter(plugin=plugin) as sub: + sub(wf) + + results = wf.result() + assert results.output.out == [6, 8] + # checking the output directory + assert wf.output_dir.exists() + + +def test_wfasnd_wfst_4(plugin, tmpdir): + """ workflow as a node + workflow-node with two tasks, + splitter for the main workflow + """ + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) + wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) + wfnd.add(add2(name="add2_1st", x=wfnd.lzin.x)) + wfnd.add(add2(name="add2_2nd", x=wfnd.add2_1st.lzout.out)) + wfnd.set_output([("out", wfnd.add2_2nd.lzout.out)]) + + wf.add(wfnd) + wf.split("x") + wf.inputs.x = [2, 4] + wf.set_output([("out", wf.wfnd.lzout.out)]) + wf.plugin = plugin + + with Submitter(plugin=plugin) as sub: + sub(wf) + # assert wf.output_dir.exists() + results = wf.result() + assert results[0].output.out == 6 + assert results[1].output.out == 8 + # checking all directories + assert wf.output_dir + for odir in wf.output_dir: + assert odir.exists() + + # Testing caching @@ -4098,7 +4172,7 @@ def exporting_graphs(wf, name): def test_graph_1(tmpdir): """creating a set of graphs, wf with two nodes""" - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wf.add(multiply(name="mult_1", x=wf.lzin.x, y=wf.lzin.y)) wf.add(multiply(name="mult_2", x=wf.lzin.x, y=wf.lzin.x)) wf.add(add2(name="add2", x=wf.mult_1.lzout.out)) @@ -4139,7 +4213,7 @@ def test_graph_1st(tmpdir): """creating a set of graphs, wf with two nodes some nodes have splitters, should be marked with blue color """ - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wf.add(multiply(name="mult_1", x=wf.lzin.x, y=wf.lzin.y).split("x")) wf.add(multiply(name="mult_2", x=wf.lzin.x, y=wf.lzin.x)) wf.add(add2(name="add2", x=wf.mult_1.lzout.out)) @@ -4180,7 +4254,7 @@ def test_graph_1st_cmb(tmpdir): the first one has a splitter, the second has a combiner, so the third one is stateless first two nodes should be blue and the arrow between them should be blue """ - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split("x")) wf.add(add2(name="add2", x=wf.mult.lzout.out).combine("mult.x")) wf.add(list_sum(name="sum", x=wf.add2.lzout.out)) @@ -4219,7 +4293,7 @@ def test_graph_1st_cmb(tmpdir): def test_graph_2(tmpdir): """creating a graph, wf with one worfklow as a node""" - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) @@ -4253,7 +4327,7 @@ def test_graph_2st(tmpdir): """creating a set of graphs, wf with one worfklow as a node the inner workflow has a state, so should be blue """ - wf = Workflow(name="wf", input_spec=["x"]) + wf = Workflow(name="wf", input_spec=["x"], cache_dir=tmpdir) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x).split("x") wfnd.add(add2(name="add2", x=wfnd.lzin.x)) wfnd.set_output([("out", wfnd.add2.lzout.out)]) @@ -4287,7 +4361,7 @@ def test_graph_2st(tmpdir): def test_graph_3(tmpdir): """creating a set of graphs, wf with two nodes (one node is a workflow)""" - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) @@ -4329,7 +4403,7 @@ def test_graph_3st(tmpdir): the first node has a state and it should be passed to the second node (blue node and a wfasnd, and blue arrow from the node to the wfasnd) """ - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y).split("x")) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) @@ -4370,7 +4444,7 @@ def test_graph_4(tmpdir): """creating a set of graphs, wf with two nodes (one node is a workflow with two nodes inside). Connection from the node to the inner workflow. """ - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y)) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.mult.lzout.out) wfnd.add(add2(name="add2_a", x=wfnd.lzin.x)) @@ -4413,7 +4487,7 @@ def test_graph_5(tmpdir): """creating a set of graphs, wf with two nodes (one node is a workflow with two nodes inside). Connection from the inner workflow to the node. """ - wf = Workflow(name="wf", input_spec=["x", "y"]) + wf = Workflow(name="wf", input_spec=["x", "y"], cache_dir=tmpdir) wfnd = Workflow(name="wfnd", input_spec=["x"], x=wf.lzin.x) wfnd.add(add2(name="add2_a", x=wfnd.lzin.x)) wfnd.add(add2(name="add2_b", x=wfnd.add2_a.lzout.out))