diff --git a/assets/imob-parse.py b/assets/imob-parse.py index 572d4c0c..b919f0c0 100644 --- a/assets/imob-parse.py +++ b/assets/imob-parse.py @@ -5,6 +5,7 @@ import numpy as np + def return_timings(data, trial_type): """ Finds all trials matching the string 'trial_type', @@ -18,6 +19,7 @@ def return_timings(data, trial_type): return onsets + def write_afni_timings(task, offset): """ This takes the CSV files supplied with the imitate/observe @@ -25,9 +27,10 @@ def write_afni_timings(task, offset): 3dDeconvolve. """ # import the original data as a list of lists of strings :D - data = np.genfromtxt(task + '-timing.csv', skip_header=1, - delimiter=',', - dtype=(str)) + data = np.genfromtxt(task + '-timing.csv', + skip_header=1, + delimiter=',', + dtype=(str)) # find all trial types trials = [] @@ -41,20 +44,21 @@ def write_afni_timings(task, offset): onsets = return_timings(data, trial) # write an output file for this trial_type - f = open(task + '_event-times_' + trial + '.1D', 'wb') - + f = open(task + '_event-times_' + trial + '.1D', 'w') + # this is for AFNI -- blank first line for first run if OB if task == 'OB': f.write('\n') for i in range(len(onsets)): on = float(onsets[i]) - offset f.write('{o:.2f} '.format(o=on)) - + # this is for AFNI -- blank second line for second run if IM if task == 'IM': f.write('\n') - - print('Finished ' + task + ':' + trial) + + print('Finished ' + task + ':' + trial) + def main(): """ @@ -67,7 +71,7 @@ def main(): print('Timing files generated. Please have a wonderful day.') + if __name__ == '__main__': print('Should be run in the folder with the *-timing.csv files.') main() - diff --git a/bin/__init__.py b/bin/__init__.py index 6e6145b5..2b5d0818 100644 --- a/bin/__init__.py +++ b/bin/__init__.py @@ -1 +1 @@ -## Needed for tests to work \ No newline at end of file +# Needed for tests to work diff --git a/bin/archive_manifest.py b/bin/archive_manifest.py index 67d854c7..7ab61c2c 100755 --- a/bin/archive_manifest.py +++ b/bin/archive_manifest.py @@ -14,14 +14,7 @@ --showheaders Just list all of the headers for each archive """ -import io -import sys -import os.path -import tarfile -import zipfile - from docopt import docopt -import pydicom import pandas as pd import datman diff --git a/bin/dm_get_session_info.py b/bin/dm_get_session_info.py index 9e3e84f0..d2791ccd 100755 --- a/bin/dm_get_session_info.py +++ b/bin/dm_get_session_info.py @@ -107,7 +107,7 @@ def main(): results.append(result) if output_csv: - with open(output_csv, 'wb') as csvfile: + with open(output_csv, 'w') as csvfile: csv_writer = csv.writer(csvfile) csv_writer.writerow(headers) for row in results: diff --git a/bin/dm_header_checks.py b/bin/dm_header_checks.py index c9ff480a..fd30c20a 100755 --- a/bin/dm_header_checks.py +++ b/bin/dm_header_checks.py @@ -39,10 +39,10 @@ def main(): dti = args['--dti'] if ignore_file: - ignored_fields.extend(parse_file(ignore_file)) + ignored_fields.extend(header_checks.parse_file(ignore_file)) if tolerances: - tolerances = read_json(tolerances) + tolerances = header_checks.read_json(tolerances) diffs = header_checks.construct_diffs(series_json, standard_json, ignored_fields, tolerances, dti) diff --git a/bin/dm_link_project_scans.py b/bin/dm_link_project_scans.py index fefb4f7c..5cb908a3 100755 --- a/bin/dm_link_project_scans.py +++ b/bin/dm_link_project_scans.py @@ -40,7 +40,6 @@ import csv import re -import yaml from docopt import docopt import datman as dm diff --git a/bin/dm_link_shared_ids.py b/bin/dm_link_shared_ids.py index 297a88d7..66611dcf 100755 --- a/bin/dm_link_shared_ids.py +++ b/bin/dm_link_shared_ids.py @@ -17,10 +17,9 @@ that may have multiple IDs for some of its subjects. Options: - --redcap FILE The path to a text file containing a redcap token to + --redcap FILE A path to a text file containing a redcap token to access 'Scan completed' surveys. If not set the - 'redcap-token' file in the project metadata folder - will be used. + environment variable 'REDCAP_TOKEN' will be used --site-config FILE The path to a site configuration file. If not set, the default defined for datman.config.config() is used. @@ -44,6 +43,7 @@ import dm_link_project_scans as link_scans import datman.dashboard as dashboard +from datman.exceptions import InputException DRYRUN = False @@ -134,8 +134,11 @@ def get_project_redcap_records(config, redcap_cred): def get_redcap_token(config, redcap_cred): if not redcap_cred: - # Read in credentials as string from config file - redcap_cred = os.path.join(config.get_path('meta'), 'redcap-token') + token = os.getenv('REDCAP_TOKEN') + if not token: + raise InputException("Redcap token not provided. Set the shell " + "variable 'REDCAP_TOKEN' or provide a file") + return token try: token = datman.utils.read_credentials(redcap_cred)[0] diff --git a/bin/dm_link_sprl.py b/bin/dm_link_sprl.py index 3d0d1407..65c38057 100755 --- a/bin/dm_link_sprl.py +++ b/bin/dm_link_sprl.py @@ -25,16 +25,17 @@ key contains SPRL """ -from docopt import docopt import sys import os import re +import logging +import errno + +from docopt import docopt + import datman.config import datman.utils import datman.dashboard -import logging -import errno -from datman.exceptions import DashboardException logger = logging.getLogger(__name__) diff --git a/bin/dm_proc_outliers.py b/bin/dm_proc_outliers.py index 53306fc1..3a9aa6bd 100755 --- a/bin/dm_proc_outliers.py +++ b/bin/dm_proc_outliers.py @@ -51,9 +51,10 @@ "--write-stats " option is chosen, the summary statistics calculated from this csv are written out to the specified filename. """ -from docopt import docopt -import numpy as np import os +import sys + +from docopt import docopt import pandas as pd arguments = docopt(__doc__) @@ -66,7 +67,7 @@ DRYRUN = arguments['--dry-run'] if not os.path.isfile(inputfile): - sys.exit("Input file {} doesn't exist.".format(FAmap)) + sys.exit("Input file {} doesn't exist.".format(inputfile)) # read the inputdata into a pandas dataframe inputdata = pd.read_csv(inputfile, sep=',', dtype=str, comment='#') @@ -90,7 +91,7 @@ if a file is specified, check that it exists and load it ''' if not os.path.isfile(summaryin): - sys.exit("Summary Statistics file {} doesn't exist.".format(FAmap)) + sys.exit("Summary Statistics file {} doesn't exist.".format(summaryin)) SummaryStats = pd.read_csv(summaryin, sep=',', index_col=0) if 'AnyOutliers' not in inputdata.columns: diff --git a/bin/dm_qc_report.py b/bin/dm_qc_report.py index 88c58c1c..aa624a11 100755 --- a/bin/dm_qc_report.py +++ b/bin/dm_qc_report.py @@ -504,8 +504,8 @@ def get_series_to_add(series, subject): t2 = get_split_image(subject, series.series_num, 'T2') split_series = [t2] except RuntimeError as e: - logger.error("Can't add PDT2 {} to QC page. Reason: {}".format( - series.path, e.message)) + logger.error("Can't add PDT2 {} to QC page. Reason: {}" + "".format(series.path, e)) return [] try: diff --git a/bin/dm_sftp.py b/bin/dm_sftp.py index d3bce7cc..b7db6664 100755 --- a/bin/dm_sftp.py +++ b/bin/dm_sftp.py @@ -122,7 +122,7 @@ def get_server_config(cfg): try: server_config[site_server] = read_config(cfg, site=site) except datman.config.UndefinedSetting as e: - logger.debug(e.message) + logger.debug(e) return server_config diff --git a/bin/dm_symlink_scans.py b/bin/dm_symlink_scans.py index e5676cfb..8b11e13d 100755 --- a/bin/dm_symlink_scans.py +++ b/bin/dm_symlink_scans.py @@ -23,7 +23,6 @@ import os import sys -from glob import glob import fnmatch import logging diff --git a/bin/dm_update_study_status.py b/bin/dm_update_study_status.py index cae97013..d0f30a68 100644 --- a/bin/dm_update_study_status.py +++ b/bin/dm_update_study_status.py @@ -3,6 +3,7 @@ import datman.dashboard as dashboard import datman.config as cfg + def main(): config = cfg.config() studies = config.get_key('Projects').keys() @@ -10,7 +11,7 @@ def main(): for study in studies: try: config.set_study(study) - except: + except Exception: pass is_open = config.get_key('IsOpen') diff --git a/bin/dm_xnat_upload.py b/bin/dm_xnat_upload.py index 12a9706d..a24ac697 100755 --- a/bin/dm_xnat_upload.py +++ b/bin/dm_xnat_upload.py @@ -134,7 +134,7 @@ def process_archive(archivefile): try: data_exists, resource_exists = check_files_exist(archivefile, xnat_session) - except Exception as e: + except Exception: logger.error('Failed checking xnat for session: {}'.format(scanid)) return @@ -272,7 +272,7 @@ def check_files_exist(archive, xnat_session): try: scans_exist = scan_data_exists(xnat_session, local_headers) except ValueError as e: - logger.error("Please check {}: {}".format(archive, e.message)) + logger.error("Please check {}: {}".format(archive, e)) # Return true for both to prevent XNAT being modified return True, True diff --git a/bin/redcap_demographics.py b/bin/redcap_demographics.py index 7151beb9..68e67140 100755 --- a/bin/redcap_demographics.py +++ b/bin/redcap_demographics.py @@ -10,8 +10,8 @@ Name of the datman managed study Options: - --URL PATH set the REDCap URL [default: - https://redcap.smh.ca/redcap/api/] + --URL PATH set the REDCap URL + [default: https://redcap.smh.ca/redcap/api/] --output PATH set the location to save the output csv file [default: clinical/demographics.csv] @@ -81,7 +81,7 @@ def get_payload(token): def make_rest(url, payload, REDCap_variables): response = post(url, data=payload) - if response.status_code is not 200: + if response.status_code != 200: print('Cannot talk to server, response code is {}.' ''.format(response.status_code)) sys.exit(1) diff --git a/bin/track_scan_dates.py b/bin/track_scan_dates.py index db03caf6..8c48894c 100644 --- a/bin/track_scan_dates.py +++ b/bin/track_scan_dates.py @@ -1,4 +1,3 @@ -import json import operator import os from datetime import datetime diff --git a/bin/xnat_fetch_sessions.py b/bin/xnat_fetch_sessions.py index 7f23bddb..6c1a5955 100755 --- a/bin/xnat_fetch_sessions.py +++ b/bin/xnat_fetch_sessions.py @@ -101,7 +101,7 @@ def main(): credentials_file, server, project, destination = get_xnat_config( config, site) except KeyError as e: - logger.error("{}".format(e.message)) + logger.error("{}".format(e)) continue username, password = get_credentials(credentials_file) with datman.xnat.xnat(server, username, password) as xnat: @@ -118,7 +118,7 @@ def get_sessions(xnat, xnat_project, destination): session = xnat.get_session(xnat_project, session_name) except Exception as e: logger.error("Failed to get session {} from xnat. " - "Reason: {}".format(session_name, e.message)) + "Reason: {}".format(session_name, e)) continue zip_name = session_name.upper() + ".zip" @@ -139,8 +139,8 @@ def get_sessions(xnat, xnat_project, destination): try: temp_zip = session.download(xnat, temp, zip_name=zip_name) except Exception as e: - logger.error("Cant download session {}. Reason: {}".format( - session_name, e.message)) + logger.error("Cant download session {}. Reason: {}" + "".format(session_name, e)) continue restructure_zip(temp_zip, zip_path) @@ -316,13 +316,13 @@ def remove_empty_dirs(base_dir): try: shutil.rmtree(empty_dir) except OSError as e: - logger.info("Cant delete {}. Reason: {}".format(empty_dir, e.message)) + logger.info("Cant delete {}. Reason: {}".format(empty_dir, e)) def move(source, dest): try: shutil.move(source, dest) - except Exception as e: + except Exception: logger.error("Couldnt move {} to destination {}".format(source, dest)) diff --git a/datman/__init__.py b/datman/__init__.py index 172a9f5a..e242699e 100644 --- a/datman/__init__.py +++ b/datman/__init__.py @@ -6,4 +6,4 @@ # datman/dashboard.py you will get circular import errors. I am sooo sorry. # We'd have to store hardcoded file paths in the dashboard database to # otherwise fix this. -import datman.config +import datman.config # noqa: F401 diff --git a/datman/config.py b/datman/config.py index 903818bc..0ebad46c 100644 --- a/datman/config.py +++ b/datman/config.py @@ -470,7 +470,7 @@ def get_study_tags(self, study=None): except KeyError: continue - if type(site_tags) is str: + if isinstance(site_tags, str): site_tags = [site_tags] for tag_name in site_tags: @@ -517,7 +517,7 @@ def series_map(self): except KeyError: raise KeyError("Cant retrieve 'Pattern' from config. Did you " "specify a site?") - if type(pattern) is list: + if isinstance(pattern, list): pattern = "|".join(pattern) series_map[tag] = pattern return series_map diff --git a/datman/dashboard.py b/datman/dashboard.py index 2f453f27..d144239a 100644 --- a/datman/dashboard.py +++ b/datman/dashboard.py @@ -256,7 +256,7 @@ def get_bids_scan(name): scan = queries.get_scan(name, bids=True) if len(scan) > 1: raise DashboardException("Couldnt identify scan {}. {} matches " - "found".format(scan_name, len(scan))) + "found".format(name, len(scan))) if len(scan) == 1: return scan[0] return None diff --git a/datman/exceptions.py b/datman/exceptions.py index fad400c5..fe994926 100644 --- a/datman/exceptions.py +++ b/datman/exceptions.py @@ -23,3 +23,7 @@ class MetadataException(Exception): class ExportException(Exception): pass + + +class InputException(Exception): + pass diff --git a/datman/fs_log_scraper.py b/datman/fs_log_scraper.py index 2af6fa61..d96640d1 100755 --- a/datman/fs_log_scraper.py +++ b/datman/fs_log_scraper.py @@ -7,16 +7,11 @@ The FSLog class aggregates/parses the most useful details from the log files. """ import os -import sys import glob import re import datetime import logging -from docopt import docopt - -import datman.config - logger = logging.getLogger(os.path.basename(__file__)) @@ -228,7 +223,3 @@ def get_niftis(cmd_args): niftis = [item.strip('-i').strip('-T2').strip() for item in nifti_inputs] return '; '.join(niftis) - - -if __name__ == '__main__': - main() diff --git a/datman/scan_list.py b/datman/scan_list.py index f7fe9911..e3ac6bbf 100755 --- a/datman/scan_list.py +++ b/datman/scan_list.py @@ -57,7 +57,7 @@ def generate_scan_list(scan_entry_class, zip_files, dest_dir): processed_scans = get_scan_list_contents(output) except Exception as e: raise RuntimeError("Can't read scan entries from existing scans.csv " - "file. Reason: {}".format(e.message)) + "file. Reason: {}".format(e)) new_entries = make_new_entries(processed_scans, zip_files, scan_entry_class) @@ -70,7 +70,7 @@ def generate_scan_list(scan_entry_class, zip_files, dest_dir): def start_new_scan_list(output): logger.info("Starting new scans.csv file at {}".format(output)) - with open(output, 'wb') as out: + with open(output, 'w') as out: out.write('source_name\ttarget_name\tPatientName\tStudyID\n') @@ -109,7 +109,7 @@ def make_new_entries(processed_scans, zip_files, EntryClass): except Exception as e: logger.error("Cant make an entry for {}. Reason: {}".format( zip_file, - e.message)) + e)) continue new_entries.append(str(entry)) @@ -118,7 +118,7 @@ def make_new_entries(processed_scans, zip_files, EntryClass): def update_scans_csv(output, new_entries): - with open(output, 'ab') as scan_csv: + with open(output, 'a') as scan_csv: scan_csv.writelines(new_entries) diff --git a/datman/xnat.py b/datman/xnat.py index 6bdb6ff7..f7c42b4a 100644 --- a/datman/xnat.py +++ b/datman/xnat.py @@ -467,7 +467,7 @@ def put_dicoms(self, project, session, experiment, filename, retries=3): subject=session, session=experiment) try: - with open(filename) as data: + with open(filename, 'rb') as data: self._make_xnat_post(upload_url, data, retries, headers) except XnatException as e: e.study = project @@ -687,7 +687,7 @@ def _get_xnat_stream(self, url, filename, retries=3, timeout=120): .format(response.status_code)) response.raise_for_status() - with open(filename, 'wb') as f: + with open(filename, 'w') as f: try: for chunk in response.iter_content(1024): f.write(chunk) diff --git a/tests/test_config.py b/tests/test_config.py index 912c16f2..345df87d 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -4,10 +4,6 @@ """ import os -import unittest - -import nose.tools -from nose.tools import raises import datman.config as config @@ -17,4 +13,4 @@ def test_initialise_from_environ(): os.environ['DM_CONFIG'] = os.path.join(FIXTURE_DIR, 'site_config.yml') os.environ['DM_SYSTEM'] = 'test' - cfg = config.config() + config.config() diff --git a/tests/test_dm_link_project_scans.py b/tests/test_dm_link_project_scans.py index 0c101938..7aff12e4 100644 --- a/tests/test_dm_link_project_scans.py +++ b/tests/test_dm_link_project_scans.py @@ -1,9 +1,8 @@ -import os import unittest import logging import importlib -from mock import patch, mock_open +from mock import patch import datman.scanid diff --git a/tests/test_dm_link_shared_ids.py b/tests/test_dm_link_shared_ids.py index e6a6d621..e9406a06 100644 --- a/tests/test_dm_link_shared_ids.py +++ b/tests/test_dm_link_shared_ids.py @@ -1,12 +1,8 @@ -import os import importlib import unittest import copy import logging -import datman -import datman.config - # Disable all logging output for tests logging.disable(logging.CRITICAL) diff --git a/tests/test_dm_qc_report.py b/tests/test_dm_qc_report.py index 62681e12..07ea651d 100644 --- a/tests/test_dm_qc_report.py +++ b/tests/test_dm_qc_report.py @@ -6,7 +6,7 @@ from random import randint import nose.tools -from mock import patch, mock_open, call, MagicMock +from mock import patch, call, MagicMock import datman.config as cfg import datman.scan @@ -32,7 +32,7 @@ class GetConfig(unittest.TestCase): @nose.tools.raises(SystemExit) def test_exits_gracefully_with_bad_study(self): - config = qc.get_config(study="madeupcode") + qc.get_config(study="madeupcode") @nose.tools.raises(SystemExit) @patch('datman.config.config') @@ -41,7 +41,7 @@ def test_exits_gracefully_when_paths_missing_from_config(self, mock_config.return_value.get_path.side_effect = lambda path: \ {'dcm': '', 'nii': ''}[path] - config = qc.get_config("STUDY") + qc.get_config("STUDY") class VerifyInputPaths(unittest.TestCase): @@ -77,7 +77,7 @@ def test_checks_input_paths(self, mock_utils, mock_verify): def test_makes_qc_folder_if_doesnt_exist(self, mock_create, mock_verify, mock_remove): assert mock_create.call_count == 0 - scan = qc.prepare_scan("STUDY_SITE_ID_01", config) + qc.prepare_scan("STUDY_SITE_ID_01", config) assert mock_create.call_count == 1 diff --git a/tests/test_dm_xnat_upload.py b/tests/test_dm_xnat_upload.py index c704ae3a..04d5230e 100644 --- a/tests/test_dm_xnat_upload.py +++ b/tests/test_dm_xnat_upload.py @@ -1,8 +1,6 @@ -import os import unittest import importlib import logging -import zipfile from nose.tools import raises from mock import patch, MagicMock @@ -49,8 +47,7 @@ def test_raises_exception_if_scan_uids_mismatch(self, mock_headers, xnat_session = self.__get_xnat_session(self.session) # Run - files_exist = upload.check_files_exist(self.archive, xnat_session, - self.ident) + upload.check_files_exist(self.archive, xnat_session, self.ident) # Should raise an exception, so assertion is never reached assert False diff --git a/tests/test_fs_log_scraper.py b/tests/test_fs_log_scraper.py index aed74938..ed289472 100644 --- a/tests/test_fs_log_scraper.py +++ b/tests/test_fs_log_scraper.py @@ -167,7 +167,7 @@ def __init__(self, subid, status): subject1 = LogStub('SUBJECT1', 'Exited with error') subject2 = LogStub('SUBJECT2', 'Still Running') - standard_sub = scraper.choose_standard_sub([subject1, subject2]) + scraper.choose_standard_sub([subject1, subject2]) class TestVerifyStandards(unittest.TestCase): diff --git a/tests/test_scan.py b/tests/test_scan.py index 7ca155b7..f1a32621 100644 --- a/tests/test_scan.py +++ b/tests/test_scan.py @@ -44,7 +44,7 @@ class TestScan(unittest.TestCase): @raises(datman.scanid.ParseException) def test_raises_parse_exception_with_bad_subject_id(self): - subject = datman.scan.Scan(self.bad_name, self.config) + datman.scan.Scan(self.bad_name, self.config) def test_makes_scan_instance_for_id_without_session(self): subject = datman.scan.Scan(self.good_name, self.config) @@ -122,7 +122,7 @@ def test_subject_series_with_nondatman_name_causes_parse_exception( nii_list = [well_named, badly_named1, badly_named2] mock_glob.return_value = nii_list - subject = datman.scan.Scan(self.good_name, self.config) + datman.scan.Scan(self.good_name, self.config) @patch('glob.glob') def test_dicoms_lists_only_dicom_files(self, mock_glob): diff --git a/tests/test_utils.py b/tests/test_utils.py index 57da1502..28d4b051 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,6 +1,5 @@ #!/usr/bin/env python -import os import unittest import logging diff --git a/tests/test_xnat.py b/tests/test_xnat.py index f78135d2..e33602c0 100644 --- a/tests/test_xnat.py +++ b/tests/test_xnat.py @@ -175,12 +175,12 @@ def test_asks_user_to_enter_password_if_username_provided(self, mock_pass): @raises(KeyError) def test_raises_KeyError_if_username_not_given_and_not_set_in_env(self): - with patch.dict(os.environ, {}, clear=True) as mock_env: + with patch.dict(os.environ, {}, clear=True): datman.xnat.get_auth() @raises(KeyError) def test_raises_KeyError_if_username_found_in_env_and_password_not_set( self): env = {'XNAT_USER': 'someuser'} - with patch.dict('os.environ', env, clear=True) as mock_env: + with patch.dict('os.environ', env, clear=True): datman.xnat.get_auth()