Skip to content

Commit

Permalink
Merge pull request #83 from neuroscout/multi-task
Browse files Browse the repository at this point in the history
Allow multi task analyses in creation and querying, record estimator
  • Loading branch information
adelavega authored Jan 4, 2021
2 parents a8462ea + 2efb377 commit efbfb86
Show file tree
Hide file tree
Showing 17 changed files with 726 additions and 1,580 deletions.
62 changes: 33 additions & 29 deletions pyns/models/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def clone(self, id):
return self.post(id=id, sub_route='clone')

def create_analysis(self, *, name, dataset_name, predictor_names,
task=None, subject=None, run=None, session=None,
tasks=None, subjects=None, runs=None, session=None,
hrf_variables=None, contrasts=None,
dummy_contrasts=True, transformations=None, **kwargs):
""" Analysis creation "wizard". Given run selection filters, and name
Expand All @@ -177,36 +177,38 @@ def create_analysis(self, *, name, dataset_name, predictor_names,
dataset = dataset[0]

# Get task name
if task is not None:
search = [t for t in dataset['tasks'] if t['name'] == task]
if len(search) != 1:
raise ValueError(
"Task name does not match any tasks in the dataset")
task_id = search[0]['id']
if tasks is not None:
if not isinstance(tasks, list):
tasks = [tasks]

task_ids = []
for task in tasks:
search = [t for t in dataset['tasks'] if t['name'] == task]
if len(search) != 1:
raise ValueError(
"Task name does not match any tasks in the dataset")
task_ids.append(search[0]['id'])
else:
if len(dataset['tasks']) > 1:
raise ValueError(
"No task specified, but dataset has more than one task")
res = dataset['tasks'][0]
task = res['name']
task_id = res['id']
# All tasks
tasks = [t['name'] for t in dataset['tasks']]
task_ids = [t['id'] for t in dataset['tasks']]

# Get Run IDs
run_models = self._client.runs.get(
dataset_id=dataset['id'], task_id=task_id,
subject=subject, number=run, session=session)
dataset_id=dataset['id'], task_id=task_ids,
subject=subjects, number=runs, session=session)

if len(run_models) < 1:
raise ValueError("No runs could be found with the given criterion")

subject = list(set(r['subject'] for r in run_models))
run = list(set(r['number'] for r in run_models if r['number']))
run = run or None
subjects = list(set(r['subject'] for r in run_models))
runs = list(set(r['number'] for r in run_models if r['number']))
runs = runs or None

run_id = [r['id'] for r in run_models]
run_ids = [r['id'] for r in run_models]
# Get Predictor IDs
public_preds = self._client.predictors.get(
run_id=run_id, name=predictor_names, active_only=False)
run_id=run_ids, name=predictor_names, active_only=False)

predictors = [p['id'] for p in public_preds]

Expand All @@ -218,7 +220,7 @@ def create_analysis(self, *, name, dataset_name, predictor_names,
for pred in private_preds:
predictors += [p['id']
for p in self._client.user.get_predictors(
run_id=run_id, name=pred)]
run_id=run_ids, name=pred)]

if len(predictors) != len(predictor_names):
raise ValueError(
Expand All @@ -229,15 +231,15 @@ def create_analysis(self, *, name, dataset_name, predictor_names,
if transformations:
transformations = transformations.copy()
model = build_model(
name, predictor_names, task,
subject=subject, run=run, session=session,
name, predictor_names, tasks,
subjects=subjects, runs=runs, session=session,
hrf_variables=hrf_variables,
transformations=transformations,
contrasts=contrasts, dummy_contrasts=dummy_contrasts
)

analysis = Analysis(analyses=self, dataset_id=dataset['id'],
name=name, model=model, runs=run_id,
name=name, model=model, runs=run_ids,
predictors=predictors, **kwargs)

return analysis
Expand Down Expand Up @@ -316,7 +318,7 @@ def plot_report(self, id, run_id=None, plot_type='design_matrix_plot',
def upload_neurovault(self, id, validation_hash, subject_paths=None,
group_paths=None, collection_id=None, force=False,
cli_version=None, fmriprep_version=None,
n_subjects=None):
estimator=None, n_subjects=None):
""" Submit analysis for report generation
:param str id: Analysis hash_id.
:param str validation_hash: Validation hash string.
Expand All @@ -325,6 +327,7 @@ def upload_neurovault(self, id, validation_hash, subject_paths=None,
:param bool force: Force upload with unique timestamped name.
:param str: neuroscout-cli version at runtime
:param str: fmriprep version at runtime
:param str: estimator used in fitlins (anfi/nilearn)
:param int n_subjects: Number of subjects in analysis.
:return: client response object
"""
Expand All @@ -345,8 +348,9 @@ def _ts_first(paths):
req = self.post(
id=id, sub_route='upload', files=files, level='GROUP',
validation_hash=validation_hash, force=force,
fmriprep_version=fmriprep_version, cli_version=cli_version,
n_subjects=n_subjects, collection_id=collection_id)
fmriprep_version=fmriprep_version, estimator=estimator,
cli_version=cli_version, n_subjects=n_subjects,
collection_id=collection_id)
if collection_id is None:
collection_id = req['collection_id']

Expand All @@ -357,8 +361,8 @@ def _ts_first(paths):
req = self.post(
id=id, sub_route='upload', files=files, level='SUBJECT',
validation_hash=validation_hash, force=force,
fmriprep_version=fmriprep_version, cli_version=cli_version,
collection_id=collection_id)
fmriprep_version=fmriprep_version, estimator=estimator,
cli_version=cli_version, collection_id=collection_id)
if collection_id is None:
collection_id = req['collection_id']

Expand Down
2 changes: 1 addition & 1 deletion pyns/models/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def create_collection(self, collection_name, dataset_id,
descriptions=descriptions)

def get_collection(self, collection_id):
return self.get('collection', collection_id=collection_id)
return self.get(f'collection/{collection_id}')


class PredictorEvents(Base):
Expand Down
10 changes: 5 additions & 5 deletions pyns/models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def attempt_to_import(dependency, name=None, fromlist=None):
return mod


def build_model(name, variables, task, subject, run=None, session=None,
def build_model(name, variables, tasks, subjects, runs=None, session=None,
hrf_variables=None, transformations=None,
contrasts=None, dummy_contrasts=True):
""" Builds a basic two level BIDS-Model """
Expand Down Expand Up @@ -46,8 +46,8 @@ def build_model(name, variables, task, subject, run=None, session=None,
}
],
"Input": {
"Subject": subject,
"Task": task
"Subject": subjects,
"Task": tasks
},
"Name": name,
}
Expand All @@ -72,8 +72,8 @@ def build_model(name, variables, task, subject, run=None, session=None,
if dummy_contrasts == 'hrf' and hrf_variables:
model['Steps'][0]['DummyContrasts']['Conditions'] = hrf_variables

if run is not None:
model['Input']['Run'] = run
if runs is not None:
model['Input']['Run'] = runs

if session is not None:
model['Input']['Session'] = session
Expand Down
10 changes: 5 additions & 5 deletions tests/cassettes/analysis.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"http_interactions": [
{
"recorded_at": "2020-05-07T17:42:28",
"recorded_at": "2020-12-17T21:53:03",
"request": {
"body": {
"encoding": "utf-8",
Expand All @@ -15,7 +15,7 @@
"gzip, deflate"
],
"Authorization": [
"JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1ODk0NzgxMzAsImlhdCI6MTU4ODg3MzMzMCwibmJmIjoxNTg4ODczMzMwLCJpZGVudGl0eSI6MX0.IMSWGp4eutrnISnNiDRSkqUUUrbKJQvB0wmYn63DSKU"
"JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MDg4NDY3NTYsImlhdCI6MTYwODI0MTk1NiwibmJmIjoxNjA4MjQxOTU2LCJpZGVudGl0eSI6MX0.IIKlNQgC2YlfKmqMDaNKtyKm2kjpWJZroMebZLXW4Ls"
],
"Connection": [
"keep-alive"
Expand All @@ -27,15 +27,15 @@
"application/json"
],
"User-Agent": [
"python-requests/2.22.0"
"python-requests/2.21.0"
]
},
"method": "POST",
"uri": "https://neuroscout.org/api/analyses"
},
"response": {
"body": {
"base64_string": "H4sIAAAAAAAAA31SwW7CMAy97zN8DlPpBoje0BjSpLED9DAJoSq0rsho0yhxkKqKf5/TwrQdWE/xS/38nl86SDeQxI8TAblFSVhkkiCBOIqjUTQZRbN0PEueQUAhSTqkTBWQzLlEl1tlSDUaEu2rSsBRumN/DYvGRFPuqZr8hAyUsnIooG4KrCDp4E0bT+Gw8dy9G+8FbP3hC3MGd2BVEYVvDIyn0p2YsVIlwkXAh6yRS9MSOsqkllXrlONRW0LjuLuDl0aTlY5CxQRLX9ftL6yDtDWBgwLfO56DpF6IgPVN4GfQ4QxifoQ9/5ZaqV3Z2FoGwz3zRXT3uVev68Vv+pu7f5v+CFoO64ZLGM+LU6W6n42+txUjLeohsyEjY7FQ+dUEwA/Q2GBqHM+jyT6A6sxvARKynnOzvrccP035zpEkH5qXm8UqZQbnD7Wi28sZxrD2U3ZVNUT38A3hP+mkagIAAA==",
"base64_string": "H4sIAAAAAAAAA31STWvCQBC992fMOUKSYou5Sa1QqD1oDgWRMCYT3LrZLPshhuB/72xSiz3YnDJv9715b2Z7KA2ho6pABxmkcRpPknSSPOdpkk0hggodWnKFqCCbcUm2NEI70SrIlJcyggPaw3AM6pxO98yRbXkkBmqUliJo2ookZD28Ke1d+Fl7Zm+TXQQbv/+iksEtGFHF4UuA8RztkRWlqAkuEXxgQ1zqzpF1BSqUnRWWW20cacvsHl5a5QxaFyoWWPim6W6wHvJOBw0X9N7pFCwNRiJYXQ1+Bh9WE5UH2PG13KCydWsaDIEH5UvU39devq7mt/LXdP+S/hhajOOGS2jPgxO1uL8bdW8qGg2pcWfjjrShSpQ/IQB+gdaEUEk6i6e7AIoTvwXInPG8N+OHyOnjE59Zh84H8mI9X+asYP2+Ee76csY23pLhK3TGRksqhvLy8A3YCsTBZAIAAA==",
"encoding": null,
"string": ""
},
Expand All @@ -53,7 +53,7 @@
"application/json"
],
"Date": [
"Thu, 07 May 2020 17:42:28 GMT"
"Thu, 17 Dec 2020 21:53:03 GMT"
],
"Server": [
"nginx/1.15.6"
Expand Down
Loading

0 comments on commit efbfb86

Please sign in to comment.