|
9 | 9 | from nipype import Workflow, Node, MapNode |
10 | 10 | from nipype.algorithms.modelgen import SpecifyModel |
11 | 11 | from nipype.interfaces.utility import IdentityInterface, Function, Split, Merge |
12 | | -from nipype.interfaces.io import SelectFiles, DataSink |
| 12 | +from nipype.interfaces.io import SelectFiles, DataSink, DataGrabber |
13 | 13 | from nipype.interfaces.fsl.aroma import ICA_AROMA |
14 | 14 | from nipype.interfaces.fsl.preprocess import SUSAN, BET |
15 | 15 | from nipype.interfaces.fsl.model import ( |
@@ -180,22 +180,25 @@ def get_preprocessing(self): |
180 | 180 | mean_func_2.inputs.op_string = '-Tmean' |
181 | 181 | preprocessing.connect(normalize_intensity, 'out_file', mean_func_2, 'in_file') |
182 | 182 |
|
183 | | - # ImageMaths - Perform temporal highpass filtering on the data |
184 | | - def get_high_pass_filter_command(in_file): |
| 183 | + # Function get_high_pass_filter_command - Build command line for temporal highpass filter |
| 184 | + def get_high_pass_filter_command(in_file, repetition_time, high_pass_filter_cutoff): |
185 | 185 | """ Create command line for high pass filtering using image maths """ |
186 | | - from narps_open import TaskInformation |
187 | | - |
188 | | - high_pass_filter_cutoff = 100 #seconds |
189 | | - repetition_time = float(TaskInformation()['RepetitionTime']) |
190 | | - |
191 | 186 | return f'-bptf {high_pass_filter_cutoff / (2.0 * repetition_time)} -1 -add {in_file}' |
192 | 187 |
|
| 188 | + high_pass_command = Node(Function( |
| 189 | + function = get_high_pass_filter_command, |
| 190 | + input_names = ['in_file', 'repetition_time', 'high_pass_filter_cutoff'], |
| 191 | + output_names = ['command'] |
| 192 | + ), name = 'high_pass_command') |
| 193 | + high_pass_command.inputs.high_pass_filter_cutoff = 100.0 #seconds |
| 194 | + high_pass_command.inputs.repetition_time = TaskInformation()['RepetitionTime'] |
| 195 | + preprocessing.connect(mean_func_2, 'out_file', high_pass_command, 'in_file') |
| 196 | + |
| 197 | + # ImageMaths - Perform temporal highpass filtering on the data |
193 | 198 | high_pass_filter = Node(ImageMaths(), name = 'high_pass_filter') |
194 | 199 | high_pass_filter.inputs.suffix = '_tempfilt' |
195 | 200 | preprocessing.connect(normalize_intensity, 'out_file', high_pass_filter, 'in_file') |
196 | | - preprocessing.connect( |
197 | | - mean_func_2, ('out_file', get_high_pass_filter_command), |
198 | | - high_pass_filter, 'op_string') |
| 201 | + preprocessing.connect(high_pass_command, 'command', high_pass_filter, 'op_string') |
199 | 202 |
|
200 | 203 | # DataSink Node - store the wanted results in the wanted repository |
201 | 204 | data_sink = Node(DataSink(), name = 'data_sink') |
@@ -240,7 +243,7 @@ def get_preprocessing_outputs(self): |
240 | 243 | template = join( |
241 | 244 | self.directories.output_dir, 'preprocessing', |
242 | 245 | '_run_id_{run_id}_subject_id_{subject_id}', |
243 | | - 'sub-{subject_id}_task-MGT_run-{run_id}_bold_space-MNI152NLin2009cAsym_preproc_dtype_thresh_smooth_intnorm.nii.gz') |
| 246 | + 'denoised_func_data_nonaggr_brain_smooth_intnorm_tempfilt.nii.gz') |
244 | 247 |
|
245 | 248 | return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ |
246 | 249 | for parameter_values in parameter_sets] |
@@ -329,7 +332,7 @@ def get_parameters_file(in_file): |
329 | 332 | from pandas import read_csv, DataFrame |
330 | 333 | from numpy import array, transpose |
331 | 334 |
|
332 | | - data_frame = read_csv(filepath, sep = '\t', header=0) |
| 335 | + data_frame = read_csv(in_file, sep = '\t', header=0) |
333 | 336 | if 'NonSteadyStateOutlier00' in data_frame.columns: |
334 | 337 | temp_list = array([ |
335 | 338 | data_frame['X'], data_frame['Y'], data_frame['Z'], |
@@ -379,7 +382,7 @@ def get_run_level_analysis(self): |
379 | 382 | # Preprocessed functional MRI |
380 | 383 | 'func' : join(self.directories.output_dir, 'preprocessing', |
381 | 384 | '_run_id_{run_id}_subject_id_{subject_id}', |
382 | | - 'sub-{subject_id}_task-MGT_run-{run_id}_bold_space-MNI152NLin2009cAsym_preproc_dtype_thresh_smooth_intnorm.nii.gz' |
| 385 | + 'denoised_func_data_nonaggr_brain_smooth_intnorm_tempfilt.nii.gz' |
383 | 386 | ), |
384 | 387 | # Event file |
385 | 388 | 'events' : join('sub-{subject_id}', 'func', |
@@ -429,14 +432,14 @@ def get_run_level_analysis(self): |
429 | 432 | # FEATModel Node - Generate run level model |
430 | 433 | model_generation = Node(FEATModel(), name = 'model_generation') |
431 | 434 | run_level_analysis.connect(model_design, 'ev_files', model_generation, 'ev_files') |
432 | | - run_level_analysis.connect(model_design, 'fsf_files', model_generation, 'fsf_files') |
| 435 | + run_level_analysis.connect(model_design, 'fsf_files', model_generation, 'fsf_file') |
433 | 436 |
|
434 | 437 | # FILMGLS Node - Estimate first level model |
435 | 438 | model_estimate = Node(FILMGLS(), name = 'model_estimate') |
436 | 439 | model_estimate.inputs.smooth_autocorr = True |
437 | 440 | model_estimate.inputs.mask_size = 5 |
438 | 441 | model_estimate.inputs.threshold = 1000 |
439 | | - run_level_analysis.connect(smoothing_func, 'out_file', model_estimate, 'in_file') |
| 442 | + run_level_analysis.connect(select_files, 'func', model_estimate, 'in_file') |
440 | 443 | run_level_analysis.connect(model_generation, 'con_file', model_estimate, 'tcon_file') |
441 | 444 | run_level_analysis.connect(model_generation, 'design_file', model_estimate, 'design_file') |
442 | 445 |
|
@@ -500,6 +503,12 @@ def get_subject_level_analysis(self): |
500 | 503 | Returns: |
501 | 504 | - subject_level_analysis : nipype.WorkFlow |
502 | 505 | """ |
| 506 | + # Create run level analysis workflow and connect its nodes |
| 507 | + subject_level = Workflow( |
| 508 | + base_dir = self.directories.working_dir, |
| 509 | + name = 'subject_level_analysis' |
| 510 | + ) |
| 511 | + |
503 | 512 | # Infosource Node - To iterate on subject and runs |
504 | 513 | information_source = Node(IdentityInterface( |
505 | 514 | fields = ['subject_id', 'contrast_id']), |
@@ -612,29 +621,34 @@ def get_subject_level_analysis(self): |
612 | 621 | subject_level.connect( |
613 | 622 | estimate_model, 'var_copes', data_sink, 'subject_level_analysis.@varcopes') |
614 | 623 |
|
615 | | - return subject_level_analysis |
| 624 | + return subject_level |
616 | 625 |
|
617 | 626 | def get_subject_level_outputs(self): |
618 | 627 | """ Return the names of the files the subject level analysis is supposed to generate. """ |
619 | 628 |
|
| 629 | + # Copes, varcopes, stats |
620 | 630 | parameters = { |
621 | 631 | 'contrast_id' : self.contrast_list, |
622 | | - 'subject_id' : self.subject_list, |
| 632 | + 'subject_ev' : range(1, 1+len(self.subject_list)) |
623 | 633 | } |
624 | 634 | parameter_sets = product(*parameters.values()) |
625 | | - output_dir = join(self.directories.output_dir, 'subject_level_analysis', |
626 | | - '_contrast_id_{contrast_id}_subject_id_{subject_id}') |
| 635 | + output_dir = join(self.directories.output_dir, 'subject_level_analysis') |
627 | 636 | templates = [ |
628 | | - join(output_dir, 'cope1.nii.gz'), |
629 | | - join(output_dir, 'tstat1.nii.gz'), |
630 | | - join(output_dir, 'varcope1.nii.gz'), |
631 | | - join(output_dir, 'zstat1.nii.gz'), |
632 | | - join(output_dir, 'sub-{subject_id}_task-MGT_run-01_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz') |
| 637 | + join(output_dir, '_contrast_id_{contrast_id}', 'cope{subject_ev}.nii.gz'), |
| 638 | + join(output_dir, '_contrast_id_{contrast_id}', 'tstat{subject_ev}.nii.gz'), |
| 639 | + join(output_dir, '_contrast_id_{contrast_id}', 'varcope{subject_ev}.nii.gz'), |
| 640 | + join(output_dir, '_contrast_id_{contrast_id}', 'zstat{subject_ev}.nii.gz') |
633 | 641 | ] |
634 | | - |
635 | | - return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ |
| 642 | + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ |
636 | 643 | for parameter_values in parameter_sets for template in templates] |
637 | 644 |
|
| 645 | + # Mask |
| 646 | + return_list.append(join(output_dir, |
| 647 | + f'sub-{self.subject_list[0]}_task-MGT_run-{self.run_list[0]}_bold_space-MNI152NLin2009cAsym_brainmask_merged_maths.nii.gz' |
| 648 | + )) |
| 649 | + |
| 650 | + return return_list |
| 651 | + |
638 | 652 | def get_one_sample_t_test_regressors(subject_list: list) -> dict: |
639 | 653 | """ |
640 | 654 | Create dictionary of regressors for one sample t-test group analysis. |
|
0 commit comments