Skip to content

Commit

Permalink
Merge pull request #464 from satra/sty/black
Browse files Browse the repository at this point in the history
Style update files with latest release of black
  • Loading branch information
satra authored Apr 30, 2021
2 parents 0027552 + 9c3b20e commit a9224dd
Show file tree
Hide file tree
Showing 30 changed files with 865 additions and 870 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/teststyle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,5 @@ jobs:

- name: Check Style
run: |
pip install black==19.3b0 codecov
pip install black==21.4b2 codecov
black --check pydra setup.py
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,6 @@ repos:
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/psf/black
rev: 19.3b0
rev: 21.4b2
hooks:
- id: black
8 changes: 4 additions & 4 deletions pydra/engine/boutiques.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ def _download_spec(self, zenodo_id):
return zenodo_file

def _prepare_input_spec(self, names_subset=None):
""" creating input spec from the zenodo file
if name_subset provided, only names from the subset will be used in the spec
"""creating input spec from the zenodo file
if name_subset provided, only names from the subset will be used in the spec
"""
binputs = self.bosh_spec["inputs"]
self._input_spec_keys = {}
Expand Down Expand Up @@ -148,8 +148,8 @@ def _prepare_input_spec(self, names_subset=None):
return spec

def _prepare_output_spec(self, names_subset=None):
""" creating output spec from the zenodo file
if name_subset provided, only names from the subset will be used in the spec
"""creating output spec from the zenodo file
if name_subset provided, only names from the subset will be used in the spec
"""
boutputs = self.bosh_spec["output-files"]
fields = []
Expand Down
38 changes: 19 additions & 19 deletions pydra/engine/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,10 +238,10 @@ def errored(self):

@property
def checksum(self):
""" Calculates the unique checksum of the task.
Used to create specific directory name for task that are run;
and to create nodes checksums needed for graph checkums
(before the tasks have inputs etc.)
"""Calculates the unique checksum of the task.
Used to create specific directory name for task that are run;
and to create nodes checksums needed for graph checkums
(before the tasks have inputs etc.)
"""
input_hash = self.inputs.hash
if self.state is None:
Expand Down Expand Up @@ -305,9 +305,9 @@ def checksum_states(self, state_index=None):

@property
def uid(self):
""" the unique id number for the task
It will be used to create unique names for slurm scripts etc.
without a need to run checksum
"""the unique id number for the task
It will be used to create unique names for slurm scripts etc.
without a need to run checksum
"""
return self._uid

Expand All @@ -334,16 +334,16 @@ def set_state(self, splitter, combiner=None):
@property
def output_names(self):
"""Get the names of the outputs from the task's output_spec
(not everything has to be generated, see generated_output_names).
(not everything has to be generated, see generated_output_names).
"""
return [f.name for f in attr.fields(make_klass(self.output_spec))]

@property
def generated_output_names(self):
""" Get the names of the outputs generated by the task.
If the spec doesn't have generated_output_names method,
it uses output_names.
The results depends on the input provided to the task
"""Get the names of the outputs generated by the task.
If the spec doesn't have generated_output_names method,
it uses output_names.
The results depends on the input provided to the task
"""
output_klass = make_klass(self.output_spec)
if hasattr(output_klass, "generated_output_names"):
Expand Down Expand Up @@ -606,7 +606,7 @@ def get_input_el(self, ind):
return None, inputs_dict

def pickle_task(self):
""" Pickling the tasks with full inputs"""
"""Pickling the tasks with full inputs"""
pkl_files = self.cache_dir / "pkl_files"
pkl_files.mkdir(exist_ok=True, parents=True)
task_main_path = pkl_files / f"{self.name}_{self.uid}_task.pklz"
Expand Down Expand Up @@ -862,10 +862,10 @@ def graph_sorted(self):

@property
def checksum(self):
""" Calculates the unique checksum of the task.
Used to create specific directory name for task that are run;
and to create nodes checksums needed for graph checkums
(before the tasks have inputs etc.)
"""Calculates the unique checksum of the task.
Used to create specific directory name for task that are run;
and to create nodes checksums needed for graph checkums
(before the tasks have inputs etc.)
"""
# if checksum is called before run the _graph_checksums is not ready
if is_workflow(self) and self.inputs._graph_checksums is attr.NOTHING:
Expand All @@ -884,8 +884,8 @@ def checksum(self):
return self._checksum

def _checksum_wf(self, input_hash, with_splitter=False):
""" creating hash value for workflows
includes connections and splitter if with_splitter is True
"""creating hash value for workflows
includes connections and splitter if with_splitter is True
"""
connection_hash = hash_function(self._connections)
hash_list = [input_hash, connection_hash]
Expand Down
16 changes: 8 additions & 8 deletions pydra/engine/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,9 @@ def edges_names(self):

@property
def nodes_details(self):
""" dictionary with details of the nodes
for each task, there are inputs/outputs and connections
(with input/output fields names)
"""dictionary with details of the nodes
for each task, there are inputs/outputs and connections
(with input/output fields names)
"""
# removing repeated fields from inputs and outputs
for el in self._nodes_details.values():
Expand Down Expand Up @@ -156,7 +156,7 @@ def add_edges(self, new_edges):
self.sorting(presorted=self.sorted_nodes + [])

def add_edges_description(self, new_edge_details):
""" adding detailed description of the connections, filling _nodes_details"""
"""adding detailed description of the connections, filling _nodes_details"""
in_nd, in_fld, out_nd, out_fld = new_edge_details
for key in [in_nd, out_nd]:
self._nodes_details.setdefault(
Expand Down Expand Up @@ -309,7 +309,7 @@ def _checking_successors_nodes(self, node, remove=True):
return True

def remove_successors_nodes(self, node):
""" Removing all the nodes that follow the node"""
"""Removing all the nodes that follow the node"""
self._successors_all = []
self._checking_successors_nodes(node=node, remove=False)
self.remove_nodes_connections(nodes=node)
Expand Down Expand Up @@ -352,7 +352,7 @@ def calculate_max_paths(self):
self._checking_path(node_name=nm, first_name=nm)

def create_dotfile_simple(self, outdir, name="graph"):
""" creates a simple dotfile (no nested structure)"""
"""creates a simple dotfile (no nested structure)"""
from .core import is_workflow

dotstr = "digraph G {\n"
Expand Down Expand Up @@ -384,7 +384,7 @@ def create_dotfile_simple(self, outdir, name="graph"):
return dotfile

def create_dotfile_detailed(self, outdir, name="graph_det"):
""" creates a detailed dotfile (detailed connections - input/output fields,
"""creates a detailed dotfile (detailed connections - input/output fields,
but no nested structure)
"""
dotstr = "digraph structs {\n"
Expand Down Expand Up @@ -492,7 +492,7 @@ def _create_dotfile_single_graph(self, nodes, edges):
return dotstr

def export_graph(self, dotfile, ext="png"):
""" exporting dotfile to other format, equires the dot command"""
"""exporting dotfile to other format, equires the dot command"""
available_ext = [
"bmp",
"canon",
Expand Down
26 changes: 13 additions & 13 deletions pydra/engine/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def save(task_path: Path, result=None, task=None, name_prefix=None):


def copyfile_workflow(wf_path, result):
""" if file in the wf results, the file will be copied to the workflow directory"""
"""if file in the wf results, the file will be copied to the workflow directory"""
for field in attr_fields(result.output):
value = getattr(result.output, field.name)
# if the field is a path or it can contain a path _copyfile_single_value is run
Expand All @@ -175,7 +175,7 @@ def copyfile_workflow(wf_path, result):


def _copyfile_single_value(wf_path, value):
""" checking a single value for files that need to be copied to the wf dir"""
"""checking a single value for files that need to be copied to the wf dir"""
if isinstance(value, (tuple, list)):
return [_copyfile_single_value(wf_path, val) for val in value]
elif isinstance(value, dict):
Expand Down Expand Up @@ -369,7 +369,7 @@ def custom_validator(instance, attribute, value):


def _type_validator(instance, attribute, value, tp, cont_type):
""" creating a customized type validator,
"""creating a customized type validator,
uses validator.deep_iterable/mapping if the field is a container
(i.e. ty.List or ty.Dict),
it also tries to guess when the value is a list due to the splitter
Expand Down Expand Up @@ -421,9 +421,9 @@ def _types_updates(tp_list, name):


def _single_type_update(tp, name, simplify=False):
""" updating a single type with other related types - e.g. adding bytes for str
if simplify is True, than changing typing.List to list etc.
(assuming that I validate only one depth, so have to simplify at some point)
"""updating a single type with other related types - e.g. adding bytes for str
if simplify is True, than changing typing.List to list etc.
(assuming that I validate only one depth, so have to simplify at some point)
"""
if isinstance(tp, type) or tp in [File, Directory]:
if tp is str:
Expand Down Expand Up @@ -456,7 +456,7 @@ def _single_type_update(tp, name, simplify=False):


def _check_special_type(tp, name):
"""checking if the type is a container: ty.List, ty.Dict or ty.Union """
"""checking if the type is a container: ty.List, ty.Dict or ty.Union"""
if sys.version_info.minor >= 8:
return ty.get_origin(tp), ty.get_args(tp)
else:
Expand All @@ -477,7 +477,7 @@ def _check_special_type(tp, name):


def _allowed_values_validator(instance, attribute, value):
""" checking if the values is in allowed_values"""
"""checking if the values is in allowed_values"""
allowed = attribute.metadata["allowed_values"]
if value is attr.NOTHING:
pass
Expand Down Expand Up @@ -769,9 +769,9 @@ def load_and_run(
task_pkl, ind=None, rerun=False, submitter=None, plugin=None, **kwargs
):
"""
loading a task from a pickle file, settings proper input
and running the task
"""
loading a task from a pickle file, settings proper input
and running the task
"""
try:
task = load_task(task_pkl=task_pkl, ind=ind)
except Exception as excinfo:
Expand Down Expand Up @@ -813,7 +813,7 @@ async def load_and_run_async(task_pkl, ind=None, submitter=None, rerun=False, **


def load_task(task_pkl, ind=None):
""" loading a task from a pickle file, settings proper input for the specific ind"""
"""loading a task from a pickle file, settings proper input for the specific ind"""
if isinstance(task_pkl, str):
task_pkl = Path(task_pkl)
task = cp.loads(task_pkl.read_bytes())
Expand Down Expand Up @@ -863,7 +863,7 @@ def position_sort(args):


def argstr_formatting(argstr, inputs, value_updates=None):
""" formatting argstr that have form {field_name},
"""formatting argstr that have form {field_name},
using values from inputs and updating with value_update if provided
"""
inputs_dict = attr.asdict(inputs)
Expand Down
2 changes: 1 addition & 1 deletion pydra/engine/helpers_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,7 @@ def is_local_file(f):


def is_existing_file(value):
""" checking if an object is an existing file"""
"""checking if an object is an existing file"""
if isinstance(value, str) and value == "":
return False
try:
Expand Down
16 changes: 8 additions & 8 deletions pydra/engine/helpers_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def rpn2splitter(splitter_rpn):


def add_name_combiner(combiner, name):
""" adding a node's name to each field from the combiner"""
"""adding a node's name to each field from the combiner"""
combiner_changed = []
for comb in combiner:
if "." not in comb:
Expand All @@ -318,7 +318,7 @@ def add_name_combiner(combiner, name):


def add_name_splitter(splitter, name):
""" adding a node's name to each field from the splitter"""
"""adding a node's name to each field from the splitter"""
if isinstance(splitter, str):
return _add_name([splitter], name)[0]
elif isinstance(splitter, list):
Expand All @@ -329,7 +329,7 @@ def add_name_splitter(splitter, name):


def _add_name(mlist, name):
""" adding anem to each element from the list"""
"""adding anem to each element from the list"""
for i, elem in enumerate(mlist):
if isinstance(elem, str):
if "." in elem or elem.startswith("_"):
Expand Down Expand Up @@ -371,7 +371,7 @@ def iter_splits(iterable, keys):


def input_shape(inp, cont_dim=1):
"""Get input shape, depends on the container dimension, if not specify it is assumed to be 1 """
"""Get input shape, depends on the container dimension, if not specify it is assumed to be 1"""
# TODO: have to be changed for inner splitter (sometimes different length)
cont_dim -= 1
shape = [len(inp)]
Expand Down Expand Up @@ -536,7 +536,7 @@ def splits(splitter_rpn, inputs, inner_inputs=None, cont_dim=None):
def _single_op_splits(
op_single, inputs, inner_inputs, previous_states_ind, cont_dim=None
):
""" splits function if splitter is a singleton"""
"""splits function if splitter is a singleton"""
if op_single.startswith("_"):
return (previous_states_ind[op_single][0], previous_states_ind[op_single][1])
if cont_dim is None:
Expand All @@ -562,8 +562,8 @@ def _single_op_splits(


def splits_groups(splitter_rpn, combiner=None, inner_inputs=None):
""" splits inputs to groups (axes) and creates stacks for these groups
This is used to specify which input can be combined.
"""splits inputs to groups (axes) and creates stacks for these groups
This is used to specify which input can be combined.
"""
if not splitter_rpn:
return [], {}, [], []
Expand Down Expand Up @@ -696,7 +696,7 @@ def splits_groups(splitter_rpn, combiner=None, inner_inputs=None):


def _single_op_splits_groups(op_single, combiner, inner_inputs, groups):
""" splits_groups function if splitter is a singleton"""
"""splits_groups function if splitter is a singleton"""
if op_single in inner_inputs:
# TODO: have to be changed if differ length
# TODO: i think I don't want to add here from left part
Expand Down
10 changes: 5 additions & 5 deletions pydra/engine/specs.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,9 +476,9 @@ def collect_additional_outputs(self, inputs, output_dir, outputs):
return additional_out

def generated_output_names(self, inputs, output_dir):
""" Returns a list of all outputs that will be generated by the task.
Takes into account the task input and the requires list for the output fields.
TODO: should be in all Output specs?
"""Returns a list of all outputs that will be generated by the task.
Takes into account the task input and the requires list for the output fields.
TODO: should be in all Output specs?
"""
# checking the input (if all mandatory fields are provided, etc.)
inputs.check_fields_input_spec()
Expand Down Expand Up @@ -575,8 +575,8 @@ def _field_metadata(self, fld, inputs, output_dir, outputs=None):
raise Exception("(_field_metadata) is not a current valid metadata key.")

def _check_requires(self, fld, inputs):
""" checking if all fields from the requires and template are set in the input
if requires is a list of list, checking if at least one list has all elements set
"""checking if all fields from the requires and template are set in the input
if requires is a list of list, checking if at least one list has all elements set
"""
from .helpers import ensure_list

Expand Down
Loading

0 comments on commit a9224dd

Please sign in to comment.