Skip to content

Commit efbfb86

Browse files
authored
Merge pull request #83 from neuroscout/multi-task
Allow multi task analyses in creation and querying, record estimator
2 parents a8462ea + 2efb377 commit efbfb86

17 files changed

+726
-1580
lines changed

pyns/models/analysis.py

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def clone(self, id):
160160
return self.post(id=id, sub_route='clone')
161161

162162
def create_analysis(self, *, name, dataset_name, predictor_names,
163-
task=None, subject=None, run=None, session=None,
163+
tasks=None, subjects=None, runs=None, session=None,
164164
hrf_variables=None, contrasts=None,
165165
dummy_contrasts=True, transformations=None, **kwargs):
166166
""" Analysis creation "wizard". Given run selection filters, and name
@@ -177,36 +177,38 @@ def create_analysis(self, *, name, dataset_name, predictor_names,
177177
dataset = dataset[0]
178178

179179
# Get task name
180-
if task is not None:
181-
search = [t for t in dataset['tasks'] if t['name'] == task]
182-
if len(search) != 1:
183-
raise ValueError(
184-
"Task name does not match any tasks in the dataset")
185-
task_id = search[0]['id']
180+
if tasks is not None:
181+
if not isinstance(tasks, list):
182+
tasks = [tasks]
183+
184+
task_ids = []
185+
for task in tasks:
186+
search = [t for t in dataset['tasks'] if t['name'] == task]
187+
if len(search) != 1:
188+
raise ValueError(
189+
"Task name does not match any tasks in the dataset")
190+
task_ids.append(search[0]['id'])
186191
else:
187-
if len(dataset['tasks']) > 1:
188-
raise ValueError(
189-
"No task specified, but dataset has more than one task")
190-
res = dataset['tasks'][0]
191-
task = res['name']
192-
task_id = res['id']
192+
# All tasks
193+
tasks = [t['name'] for t in dataset['tasks']]
194+
task_ids = [t['id'] for t in dataset['tasks']]
193195

194196
# Get Run IDs
195197
run_models = self._client.runs.get(
196-
dataset_id=dataset['id'], task_id=task_id,
197-
subject=subject, number=run, session=session)
198+
dataset_id=dataset['id'], task_id=task_ids,
199+
subject=subjects, number=runs, session=session)
198200

199201
if len(run_models) < 1:
200202
raise ValueError("No runs could be found with the given criterion")
201203

202-
subject = list(set(r['subject'] for r in run_models))
203-
run = list(set(r['number'] for r in run_models if r['number']))
204-
run = run or None
204+
subjects = list(set(r['subject'] for r in run_models))
205+
runs = list(set(r['number'] for r in run_models if r['number']))
206+
runs = runs or None
205207

206-
run_id = [r['id'] for r in run_models]
208+
run_ids = [r['id'] for r in run_models]
207209
# Get Predictor IDs
208210
public_preds = self._client.predictors.get(
209-
run_id=run_id, name=predictor_names, active_only=False)
211+
run_id=run_ids, name=predictor_names, active_only=False)
210212

211213
predictors = [p['id'] for p in public_preds]
212214

@@ -218,7 +220,7 @@ def create_analysis(self, *, name, dataset_name, predictor_names,
218220
for pred in private_preds:
219221
predictors += [p['id']
220222
for p in self._client.user.get_predictors(
221-
run_id=run_id, name=pred)]
223+
run_id=run_ids, name=pred)]
222224

223225
if len(predictors) != len(predictor_names):
224226
raise ValueError(
@@ -229,15 +231,15 @@ def create_analysis(self, *, name, dataset_name, predictor_names,
229231
if transformations:
230232
transformations = transformations.copy()
231233
model = build_model(
232-
name, predictor_names, task,
233-
subject=subject, run=run, session=session,
234+
name, predictor_names, tasks,
235+
subjects=subjects, runs=runs, session=session,
234236
hrf_variables=hrf_variables,
235237
transformations=transformations,
236238
contrasts=contrasts, dummy_contrasts=dummy_contrasts
237239
)
238240

239241
analysis = Analysis(analyses=self, dataset_id=dataset['id'],
240-
name=name, model=model, runs=run_id,
242+
name=name, model=model, runs=run_ids,
241243
predictors=predictors, **kwargs)
242244

243245
return analysis
@@ -316,7 +318,7 @@ def plot_report(self, id, run_id=None, plot_type='design_matrix_plot',
316318
def upload_neurovault(self, id, validation_hash, subject_paths=None,
317319
group_paths=None, collection_id=None, force=False,
318320
cli_version=None, fmriprep_version=None,
319-
n_subjects=None):
321+
estimator=None, n_subjects=None):
320322
""" Submit analysis for report generation
321323
:param str id: Analysis hash_id.
322324
:param str validation_hash: Validation hash string.
@@ -325,6 +327,7 @@ def upload_neurovault(self, id, validation_hash, subject_paths=None,
325327
:param bool force: Force upload with unique timestamped name.
326328
:param str: neuroscout-cli version at runtime
327329
:param str: fmriprep version at runtime
330+
:param str: estimator used in fitlins (anfi/nilearn)
328331
:param int n_subjects: Number of subjects in analysis.
329332
:return: client response object
330333
"""
@@ -345,8 +348,9 @@ def _ts_first(paths):
345348
req = self.post(
346349
id=id, sub_route='upload', files=files, level='GROUP',
347350
validation_hash=validation_hash, force=force,
348-
fmriprep_version=fmriprep_version, cli_version=cli_version,
349-
n_subjects=n_subjects, collection_id=collection_id)
351+
fmriprep_version=fmriprep_version, estimator=estimator,
352+
cli_version=cli_version, n_subjects=n_subjects,
353+
collection_id=collection_id)
350354
if collection_id is None:
351355
collection_id = req['collection_id']
352356

@@ -357,8 +361,8 @@ def _ts_first(paths):
357361
req = self.post(
358362
id=id, sub_route='upload', files=files, level='SUBJECT',
359363
validation_hash=validation_hash, force=force,
360-
fmriprep_version=fmriprep_version, cli_version=cli_version,
361-
collection_id=collection_id)
364+
fmriprep_version=fmriprep_version, estimator=estimator,
365+
cli_version=cli_version, collection_id=collection_id)
362366
if collection_id is None:
363367
collection_id = req['collection_id']
364368

pyns/models/predictor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def create_collection(self, collection_name, dataset_id,
2828
descriptions=descriptions)
2929

3030
def get_collection(self, collection_id):
31-
return self.get('collection', collection_id=collection_id)
31+
return self.get(f'collection/{collection_id}')
3232

3333

3434
class PredictorEvents(Base):

pyns/models/utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def attempt_to_import(dependency, name=None, fromlist=None):
1616
return mod
1717

1818

19-
def build_model(name, variables, task, subject, run=None, session=None,
19+
def build_model(name, variables, tasks, subjects, runs=None, session=None,
2020
hrf_variables=None, transformations=None,
2121
contrasts=None, dummy_contrasts=True):
2222
""" Builds a basic two level BIDS-Model """
@@ -46,8 +46,8 @@ def build_model(name, variables, task, subject, run=None, session=None,
4646
}
4747
],
4848
"Input": {
49-
"Subject": subject,
50-
"Task": task
49+
"Subject": subjects,
50+
"Task": tasks
5151
},
5252
"Name": name,
5353
}
@@ -72,8 +72,8 @@ def build_model(name, variables, task, subject, run=None, session=None,
7272
if dummy_contrasts == 'hrf' and hrf_variables:
7373
model['Steps'][0]['DummyContrasts']['Conditions'] = hrf_variables
7474

75-
if run is not None:
76-
model['Input']['Run'] = run
75+
if runs is not None:
76+
model['Input']['Run'] = runs
7777

7878
if session is not None:
7979
model['Input']['Session'] = session

tests/cassettes/analysis.json

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"http_interactions": [
33
{
4-
"recorded_at": "2020-05-07T17:42:28",
4+
"recorded_at": "2020-12-17T21:53:03",
55
"request": {
66
"body": {
77
"encoding": "utf-8",
@@ -15,7 +15,7 @@
1515
"gzip, deflate"
1616
],
1717
"Authorization": [
18-
"JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1ODk0NzgxMzAsImlhdCI6MTU4ODg3MzMzMCwibmJmIjoxNTg4ODczMzMwLCJpZGVudGl0eSI6MX0.IMSWGp4eutrnISnNiDRSkqUUUrbKJQvB0wmYn63DSKU"
18+
"JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2MDg4NDY3NTYsImlhdCI6MTYwODI0MTk1NiwibmJmIjoxNjA4MjQxOTU2LCJpZGVudGl0eSI6MX0.IIKlNQgC2YlfKmqMDaNKtyKm2kjpWJZroMebZLXW4Ls"
1919
],
2020
"Connection": [
2121
"keep-alive"
@@ -27,15 +27,15 @@
2727
"application/json"
2828
],
2929
"User-Agent": [
30-
"python-requests/2.22.0"
30+
"python-requests/2.21.0"
3131
]
3232
},
3333
"method": "POST",
3434
"uri": "https://neuroscout.org/api/analyses"
3535
},
3636
"response": {
3737
"body": {
38-
"base64_string": "H4sIAAAAAAAAA31SwW7CMAy97zN8DlPpBoje0BjSpLED9DAJoSq0rsho0yhxkKqKf5/TwrQdWE/xS/38nl86SDeQxI8TAblFSVhkkiCBOIqjUTQZRbN0PEueQUAhSTqkTBWQzLlEl1tlSDUaEu2rSsBRumN/DYvGRFPuqZr8hAyUsnIooG4KrCDp4E0bT+Gw8dy9G+8FbP3hC3MGd2BVEYVvDIyn0p2YsVIlwkXAh6yRS9MSOsqkllXrlONRW0LjuLuDl0aTlY5CxQRLX9ftL6yDtDWBgwLfO56DpF6IgPVN4GfQ4QxifoQ9/5ZaqV3Z2FoGwz3zRXT3uVev68Vv+pu7f5v+CFoO64ZLGM+LU6W6n42+txUjLeohsyEjY7FQ+dUEwA/Q2GBqHM+jyT6A6sxvARKynnOzvrccP035zpEkH5qXm8UqZQbnD7Wi28sZxrD2U3ZVNUT38A3hP+mkagIAAA==",
38+
"base64_string": "H4sIAAAAAAAAA31STWvCQBC992fMOUKSYou5Sa1QqD1oDgWRMCYT3LrZLPshhuB/72xSiz3YnDJv9715b2Z7KA2ho6pABxmkcRpPknSSPOdpkk0hggodWnKFqCCbcUm2NEI70SrIlJcyggPaw3AM6pxO98yRbXkkBmqUliJo2ookZD28Ke1d+Fl7Zm+TXQQbv/+iksEtGFHF4UuA8RztkRWlqAkuEXxgQ1zqzpF1BSqUnRWWW20cacvsHl5a5QxaFyoWWPim6W6wHvJOBw0X9N7pFCwNRiJYXQ1+Bh9WE5UH2PG13KCydWsaDIEH5UvU39devq7mt/LXdP+S/hhajOOGS2jPgxO1uL8bdW8qGg2pcWfjjrShSpQ/IQB+gdaEUEk6i6e7AIoTvwXInPG8N+OHyOnjE59Zh84H8mI9X+asYP2+Ee76csY23pLhK3TGRksqhvLy8A3YCsTBZAIAAA==",
3939
"encoding": null,
4040
"string": ""
4141
},
@@ -53,7 +53,7 @@
5353
"application/json"
5454
],
5555
"Date": [
56-
"Thu, 07 May 2020 17:42:28 GMT"
56+
"Thu, 17 Dec 2020 21:53:03 GMT"
5757
],
5858
"Server": [
5959
"nginx/1.15.6"

0 commit comments

Comments
 (0)