diff --git a/narps_open/pipelines/team_I9D6/README.md b/narps_open/pipelines/team_I9D6/README.md new file mode 100644 index 00000000..50f113cb --- /dev/null +++ b/narps_open/pipelines/team_I9D6/README.md @@ -0,0 +1,42 @@ +# apaper_highlight_narps +Scripts related to the following paper: + + **Highlight Results, Don't Hide Them: Enhance interpretation, reduce + biases and improve reproducibility** \ + by Paul A Taylor, Richard C Reynolds, Vince Calhoun, Javier + Gonzalez-Castillo, Daniel A Handwerker, Peter A Bandettini, Amanda F + Mejia, Gang Chen (2023) \ + Neuroimage 274:120138. doi: 10.1016/j.neuroimage.2023.120138 \ + https://pubmed.ncbi.nlm.nih.gov/37116766/ + +--------------------------------------------------------------------------- +The input data comes from the NARPS project (Botvinik-Nezer et al., 2020): \ + https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7771346/ \ +This paper uses both the raw, unprocessed data as well as the +participating teams' results, which were uploaded to NeuroVault (see +the same paper for those details). + +--------------------------------------------------------------------------- +Essentially all scripts here use AFNI; one also uses FreeSurfer. + +The `scripts_biowulf` directory contains the main processing scripts, +including: ++ Checking the data ++ Estimating nonlinear alignment to template space and skullstripping + with `@SSwarper` ++ Full FMRI time series processing through regression modeling and QC + generation with `afni_proc.py` ++ Group level modeling: both voxelwise (with cluster calcs) and + ROI-based (using `RBA`, in particular) + +... and more. + +The `scripts_suppl_proc_vox` directory contains supplementary scripts +for making images of the above-processed data, mainly for figure +generation. + +The `scripts_suppl_proc_teams` directory contains scripts for +processing the group-level results of the original participating Teams +in the NARPS project. Those public datasets were downloaded from +NeuroVault. The scripts make a lot of images and perform some simple +similarity analyses. diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/README.txt b/narps_open/pipelines/team_I9D6/scripts_biowulf/README.txt new file mode 100644 index 00000000..d494a12c --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/README.txt @@ -0,0 +1,118 @@ +This directory contains the main processing scripts for analyzing the +raw NARPS data (task FMRI on two groups of 54 subj each). These +scripts include preliminary checks, full single subject processing +through regression modeling, and a couple different forms of group +level modeling. + +These scripts were run on the NIH's Biowulf computing cluster, hence +there are considerations for batch processing with the slurm system. +Each processing step is divided into a pair of associated scripts: + ++ **do_SOMETHING.tcsh**: a script that mostly contains the processing + options and commands for a single subject, with subject ID and any + other relevant information passed in as a command line argument when + using it. Most of the lines at the top of the file set up the + processing, directory structure (most every step generates a new + filetree called `data_SOMETHING/`), and usage of a scratch disk for + intermediate outputs. At some point, actual processing commands are + run, and then there is a bit of checking, copying from the scratch + disk and verifying permissions, and then exiting. + ++ **run_SOMETHING.tcsh**: mostly manage the group-level aspects of + things, to set up processing over all subjects of interest and start + a swarm job running on the cluster. + +--------------------------------------------------------------------------- +The enumeration in script names is to help to organize the order of +processing (kind of a Dewey Decimal-esque system). Gaps between +numbers are fine---they just leave space for other processing steps to +have been inserted, as might be necessary. Loosely, each "decade" of +enumeration corresponds to a different stage of processing: + ++ the 00s are preliminary checks ++ the 10s are preliminary processing steps (parcellation, + skullstripping and nonlinear alignment for the anatomical) ++ the 20s are afni_proc.py processing of the FMRI data ++ the 50s are running some automatic quality control (QC) with + gen_ss_review_table.py ++ the 60s run voxelwise processing with ETAC (used in previous work, + not here, but included for fun) and 3dttest++ ++ the 70s contain region-based processing, setting up for and + eventually running RBA. + +--------------------------------------------------------------------------- + +The script details (recall: just listing the do_* scripts, since the +run_* ones just correspond to a swarming that step): + ++ do_01_gtkyd.tcsh + "Getting To Know Your Data" step, getting preliminary info about + voxel size, number of time points, and other fun properties; + consistency checks + ++ do_02_deob.tcsh + Deoblique the anatomical volumes (so FS output matches with later + outputs) + ++ do_12_fs.tcsh + Run FreeSurfer for anatomical parcellations + ++ do_13_ssw.tcsh + Run AFNI's @SSwarper (SSW) for skullstripping (ss) and nonlinear + alignment (warping) to MNI template space + ++ do_15_events.tcsh + Stimulus timing file creation + ++ do_22_ap_task.tcsh + Run AFNI's afni_proc.py (AP) for full processing of the FMRI data, + through single subject regression modeling (here, without blurring, + to be used for ROI-based analyses); uses results of earlier stages; + also produces QC HTML + ++ do_23_ap_task_b.tcsh + Run AFNI's afni_proc.py (AP) for full processing of the FMRI data, + through single subject regression modeling (here, with blurring, to + be used for voxelwise analyses); uses results of earlier stages; + also produces QC HTML + ++ do_52_ap_qc.tcsh + Run some automatic QC criteria selections on the "22_ap_task" output + with AFNI's gen_ss_review_table.py + ++ do_53_ap_qc.tcsh + Run some automatic QC criteria selections on the "23_ap_task_b" + output with AFNI's gen_ss_review_table.py + ++ do_61_etac_1grp.tcsh + Run ETAC for group level analysis on the "22_ap_task" output (not + used here; from a previous study, but script included); this applies + to the single-group hypotheses + ++ do_62_etac_2grp.tcsh + Run ETAC for group level analysis on the "22_ap_task" output (not + used here; from a previous study, but script included); this applies + to the group contrast hypothesis + ++ do_63_ttest_1grp.tcsh + Run 3dttest++ for group level analysis on the "23_ap_task_b" output, + for simple voxelwise analysis; this applies to the single-group + hypotheses + ++ do_64_csim_1grp.tcsh + Run "3dttest++ -Clustsim ..." for getting standard cluster threshold + size for the t-test results + ++ do_71a_rba_prep.tcsh + Prepare to run AFNI's RBA on the "22_ap_task" output, dumping + average effect estimate information for each ROI (for both of the + atlases used) + ++ do_71b_rba_comb.tcsh + Prepare to run AFNI's RBA on the "22_ap_task" output, combining ROI + information across the group into a datatable (for both of the + atlases used) + ++ do_71c_rba.tcsh + Run AFNI's RBA on the "22_ap_task" output, using the created + datatable (for both of the atlases used) diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_01_gtkyd.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_01_gtkyd.tcsh new file mode 100644 index 00000000..2454cd27 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_01_gtkyd.tcsh @@ -0,0 +1,174 @@ +#!/bin/tcsh + +# GTKYD: Getting To Know Your Data +# -> preliminary info and QC + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# important for 'clean' output here +setenv AFNI_NO_OBLIQUE_WARNING YES + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels: different than most other do*.tcsh scripts +set file_all_epi = $1 +set file_all_anat = $2 + +set template = MNI152_2009_template_SSW.nii.gz + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} # holds all sub-* dirs +set dir_gtkyd = ${dir_inroot}/data_01_gtkyd + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set all_anat = `cat ${file_all_anat}` +set all_epi = `cat ${file_all_epi}` + +# 3dinfo params +set all_info = ( n4 orient ad3 obliquity tr slice_timing \ + datum ) + +# 3dBrickStat params +set all_bstat = ( min max ) + +# nifti_tool fields +set all_nfield = ( datatype sform_code qform_code ) + + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# +# *** not used here *** +# +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +# report per data dir +\mkdir -p ${dir_gtkyd}/anat +\mkdir -p ${dir_gtkyd}/func + +# report both individual columns, and sort+uniq ones +foreach info ( ${all_info} ) + echo "++ 3dinfo -${info} ..." + + set otxt = ${dir_gtkyd}/anat/rep_info_${info}_su.dat + echo "# 3dinfo -${info}" > ${otxt} + 3dinfo -${info} ${all_anat} | sort | uniq >> ${otxt} + + set otxt = ${dir_gtkyd}/anat/rep_info_${info}_detail.dat + echo "# 3dinfo -${info}" > ${otxt} + 3dinfo -${info} -prefix ${all_anat} >> ${otxt} + + set otxt = ${dir_gtkyd}/func/rep_info_${info}_su.dat + echo "# 3dinfo -${info}" > ${otxt} + 3dinfo -${info} ${all_epi} | sort | uniq >> ${otxt} + + set otxt = ${dir_gtkyd}/func/rep_info_${info}_detail.dat + echo "# 3dinfo -${info}" > ${otxt} + 3dinfo -${info} -prefix ${all_epi} >> ${otxt} +end + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + + +# only sort+uniq at the moment +foreach nfield ( ${all_nfield} ) + echo "++ nifti_tool -disp_hdr -field ${nfield} ..." + + set otxt = ${dir_gtkyd}/anat/rep_ntool_${nfield}_su.dat + echo "# nifti_tool -disp_hdr -field ${nfield}" > ${otxt} + nifti_tool -disp_hdr -field ${nfield} -quiet -infiles ${all_anat} \ + | sort | uniq >> ${otxt} + + set otxt = ${dir_gtkyd}/func/rep_ntool_${nfield}_su.dat + echo "# nifti_tool -disp_hdr -field ${nfield}" > ${otxt} + nifti_tool -disp_hdr -field ${nfield} -quiet -infiles ${all_epi} \ + | sort | uniq >> ${otxt} +end + +# report both individual columns, and sort+uniq ones +foreach bstat ( ${all_bstat} ) + echo "++ 3dBrickStat -slow -${bstat} ..." + + set otxt = ${dir_gtkyd}/anat/rep_brickstat_${bstat}_detail.dat + foreach dset ( ${all_anat} ) + set val = `3dBrickStat -slow -${bstat} ${dset}` + set name = `3dinfo -prefix ${dset}` + printf "%12s %12s\n" "${val}" "${name}" >> ${otxt} + end + + set otxt_su = ${dir_gtkyd}/anat/rep_brickstat_${bstat}_su.dat + cat ${otxt} | awk '{print $1}' | sort -n | uniq > ${otxt_su} + + set otxt = ${dir_gtkyd}/func/rep_brickstat_${bstat}_detail.dat + foreach dset ( ${all_epi} ) + set val = `3dBrickStat -slow -${bstat} ${dset}` + set name = `3dinfo -prefix ${dset}` + printf "%12s %12s\n" "${val}" "${name}" >> ${otxt} + end + + set otxt_su = ${dir_gtkyd}/func/rep_brickstat_${bstat}_su.dat + cat ${otxt} | awk '{print $1}' | sort -n | uniq > ${otxt_su} + +end + +echo "++ done proc ok" + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# +# *** not used here *** +# +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: GTKYD (ecode = ${ecode})" +else + echo "++ GOOD FINISH: GTKYD" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_02_deob.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_02_deob.tcsh new file mode 100644 index 00000000..9659c8e0 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_02_deob.tcsh @@ -0,0 +1,150 @@ +#!/bin/tcsh + +# DEOB: deoblique the anatomical and centralize grid + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +#set ses = $2 + +# reference template for standard space +set template = MNI152_2009_template_SSW.nii.gz + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} # holds all sub-* dirs +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set dset_anat_raw = ${sdir_anat}/${subj}*T1w.nii.gz #${ses} +set dset_anat_00 = ${sdir_deob}/${subj}_T1w-deobl_nu.nii.gz + +# control variables + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_deob} + set sdir_deob = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${sdir_deob} + +3dWarp \ + -deoblique \ + -wsinc5 \ + -prefix ${sdir_deob}/${subj}_T1w-deobl.nii.gz \ + -gridset ${grid_template} \ + ${dset_anat_raw} + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +3dUnifize \ + -GM \ + -clfrac 0.4 \ + -Urad 30 \ + -input ${sdir_deob}/${subj}_T1w-deobl.nii.gz \ + -prefix ${dset_anat_00} + +if ( ${status} ) then + set ecode = 2 + goto COPY_AND_EXIT +endif + +echo "++ done proc ok" + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_deob} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_deob}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_deob}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: DEOB (ecode = ${ecode})" +else + echo "++ GOOD FINISH: DEOB" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_12_fs.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_12_fs.tcsh new file mode 100644 index 00000000..940211b2 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_12_fs.tcsh @@ -0,0 +1,153 @@ +#!/bin/tcsh + +# FS: run FreeSurfer's recon-all and AFNI's @SUMA_Make_Spec_FS. +# -> not using '-parallel' at the moment, as it can cause issues + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni freesurfer +source $FREESURFER_HOME/SetUpFreeSurfer.csh + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +#set ses = $2 + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set dset_anat_00 = ${sdir_deob}/${subj}_T1w-deobl_nu.nii.gz #/${ses} + +# control variables + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_fs} + set sdir_fs = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${sdir_fs} + +time recon-all \ + -all \ + -3T \ + #-parallel \ + -sd "${sdir_fs}" \ + -subjid "${subj}" \ + -i "${dset_anat_00}" + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +# compress path (because of recon-all output dir naming): +# move output from DIR/${subj}/${ses}/${subj}/* to DIR/${subj}/${ses}/* +\mv ${sdir_fs}/${subj}/* ${sdir_fs}/. +\rmdir ${sdir_fs}/${subj} + +@SUMA_Make_Spec_FS \ + -fs_setup \ + -NIFTI \ + -sid "${subj}" \ + -fspath "${sdir_fs}" + +if ( ${status} ) then + set ecode = 2 + goto COPY_AND_EXIT +endif + +echo "++ done proc ok" + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_fs} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_fs}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_fs}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: FS (ecode = ${ecode})" +else + echo "++ GOOD FINISH: FS" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_13_ssw.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_13_ssw.tcsh new file mode 100644 index 00000000..8256cf74 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_13_ssw.tcsh @@ -0,0 +1,133 @@ +#!/bin/tcsh + +# SSW: run @SSwarper to skullstrip (SS) and estimate a nonlinear warp. + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +#set ses = $2 + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} + +# set extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set dset_anat_00 = ${sdir_deob}/${subj}_T1w-deobl_nu.nii.gz #/${ses} + +# control variables + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_ssw} + set sdir_ssw = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +time @SSwarper \ + -base "${template}" \ + -subid "${subj}" \ + -input "${dset_anat_00}" \ + -cost_nl_final lpa \ + -unifize_off \ + -odir "${sdir_ssw}" + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +echo "++ done proc ok" + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_ssw} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_ssw}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_ssw}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: SSW (ecode = ${ecode})" +else + echo "++ GOOD FINISH: SSW" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_15_events.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_15_events.tcsh new file mode 100644 index 00000000..1fec77f2 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_15_events.tcsh @@ -0,0 +1,174 @@ +#!/bin/tcsh + +# EVENTS: create stim events files + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +#set ses = $2 + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_timing = ${sdir_epi} +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +set sdir_events = ${dir_events}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set taskname = MGT +set all_tfile = ( ${sdir_timing}/${subj}_task-${taskname}_run-0{1,2,3,4}_events.tsv ) + +set all_class = ( Resp NoResp ) + +# control variables + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_events} + set sdir_events = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${sdir_events} + +# create both duration modulated and non-modulated timing files +set tempfile = ${sdir_events}/tmp.awk.txt + +foreach class ( ${all_class} ) + set oname = ${sdir_events}/times.$class.txt + printf "" > ${oname} + + foreach tfile ( ${all_tfile} ) + + # for NoResp, just use 4s events, without modulation + if ( "${class}" == "NoResp" ) then + awk '{if($6 == "NoResp") printf "%s:%s ", $1, $2}' \ + ${tfile} >! ${tempfile} + else + awk '{if($2 == 4 && $6 != "NoResp") \ + printf "%s*%s*%s:%s ", $1, $3, $4, $5}' \ + ${tfile} >! ${tempfile} + endif + set nc = `cat ${tempfile} | wc -w` + + if ( ${nc} == 0 ) then + echo "-1:1 -1:1" >> ${oname} + else if ( ${nc} == 1 ) then + echo "`cat ${tempfile}` -1:1" >> ${oname} + else + echo "`cat ${tempfile}`" >> ${oname} + endif + end + echo "++ created timing file: ${oname}" +end + +\rm -f ${tempfile} + + +# and make an event_list file, for easy perusal +cd ${sdir_events} +timing_tool.py \ + -multi_timing times.*.txt \ + -multi_timing_to_event_list GE:ALL events.txt +cd - + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +echo "++ done proc ok" + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_events} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_events}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_events}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: EVENTS (ecode = ${ecode})" +else + echo "++ GOOD FINISH: EVENTS" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_22_ap_task.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_22_ap_task.tcsh new file mode 100644 index 00000000..a754174c --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_22_ap_task.tcsh @@ -0,0 +1,299 @@ +#!/bin/tcsh + +# AP_TASK: full task-based processing (voxelwise) +# with local EPI unifize + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +#set ses = $2 +set ap_label = 22_ap_task + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_timing = ${sdir_epi} +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +set sdir_events = ${dir_events}/${subj} #/${ses} +set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set dsets_epi = ( ${sdir_epi}/${subj}_*task*bold.nii* ) + +set dset_anat_00 = ${sdir_deob}/${subj}_T1w-deobl_nu.nii.gz #/${ses} +set anat_cp = ${sdir_ssw}/anatSS.${subj}.nii +set anat_skull = ${sdir_ssw}/anatU.${subj}.nii + +set dsets_NL_warp = ( ${sdir_ssw}/anatQQ.${subj}.nii \ + ${sdir_ssw}/anatQQ.${subj}.aff12.1D \ + ${sdir_ssw}/anatQQ.${subj}_WARP.nii ) + +# might not always use these +set mask_wm = ${sdir_suma}/fs_ap_wm.nii.gz +set roi_all_2000 = ${sdir_suma}/aparc+aseg_REN_all.nii.gz +set roi_gmr_2000 = ${sdir_suma}/aparc+aseg_REN_gmrois.nii.gz + +set timing_files = ( ${sdir_events}/times.{Resp,NoResp}.txt ) +set stim_classes = ( Resp NoResp ) + +# control variables +###set nt_rm = 0 +###set blur_size = 6 +set final_dxyz = 2.0 +set cen_motion = 0.3 +set cen_outliers = 0.05 + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_ap} + set sdir_ap = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +set ap_cmd = ${sdir_ap}/ap.cmd.${subj} + +\mkdir -p ${sdir_ap} + +# write AP command to file +cat <! ${ap_cmd} + +# Some notes on afni_proc.py (AP) option choices: +# +# **No** blur block is used here, because this is processing for ROI-based +# analysis (and could be used for ETAC, too, say). Blurring should not +# be used for ROI-based analysis. See the 'do_23*.tcsh' script for the +# related processing that does include blurring (for standard voxelwise +# analysis). +# +# This adds useful APQC HTML items, radial correlation images of initial +# and volume-registered data (might see artifacts): +# -radial_correlate_blocks tcat volreg +# +# Even though we load the skullstripped anatomical (proc'ed by @SSwarper), +# having the original, skull-on dataset brought along as a follower dset +# can be useful for verifying EPI-anatomical alignment if the CSF is bright: +# -anat_follower anat_w_skull anat \${anat_skull} +# +# Generally recommended to run @SSwarper prior to afni_proc.py for +# skullstripping (SS) the anatomical and estimating nonlinear warp to +# template; then provide those results in options here: +# -copy_anat \${anat_cp} +# ... +# -tlrc_base \${template} +# -tlrc_NL_warp +# -tlrc_NL_warped_dsets \${dsets_NL_warp} +# +# This option can help improve EPI-anatomical alignment, esp. if the EPI +# has brightness inhomogeneity (and it doesn't seem to hurt alignment even +# if that is not the case); generally recommended with human FMRI +# data processing nowadays: +# -align_unifize_epi local +# +# Generally recommended starting point for EPI-anatomical alignment in human +# FMRI proc (left-right flipping can still occur...): +# -align_opts_aea -cost lpc+ZZ -giant_move -check_flip +# +# Which EPI should be a consistently good choice to serve as a +# reference for both motion correction and EPI-anatomical alignment? +# The one with the fewest outliers sounds good: +# -volreg_align_to MIN_OUTLIER +# +# Add a post-volreg TSNR plot to the APQC HTML: +# -volreg_compute_tsnr yes +# +# Create useful mask from EPI-anatomical mask intersection (not applied +# to the EPI data here, but used to identify brain region): +# -mask_epi_anat yes +# +# Choose this shape and scaling for the regression basis; the '-1' in the +# argument means that an event with 1 s duration is scaled to 1; the choice +# of number is based on typical event duration: +# -regress_basis_multi "dmUBLOCK(-1)" +# +# Try to use Python's Matplotlib module when making the APQC HTML doc, for +# prettier (and more informative) plots; this is actually the default now: +# -html_review_style pythonic +# + + +afni_proc.py \ + -subj_id ${subj} \ + -blocks tshift align tlrc volreg mask scale regress \ + -radial_correlate_blocks tcat volreg \ + -copy_anat ${anat_cp} \ + -anat_has_skull no \ + -anat_follower anat_w_skull anat ${anat_skull} \ + -anat_follower_ROI a00all anat ${roi_all_2000} \ + -anat_follower_ROI e00all epi ${roi_all_2000} \ + -anat_follower_ROI a00gmr anat ${roi_gmr_2000} \ + -anat_follower_ROI e00gmr epi ${roi_gmr_2000} \ + -anat_follower_ROI eWMe epi ${mask_wm} \ + -anat_follower_erode eWMe \ + -dsets ${dsets_epi} \ + -tcat_remove_first_trs 0 \ + -tshift_opts_ts -tpattern alt+z2 \ + -align_unifize_epi local \ + -align_opts_aea -cost lpc+ZZ \ + -giant_move \ + -check_flip \ + -tlrc_base ${template} \ + -tlrc_NL_warp \ + -tlrc_NL_warped_dsets ${dsets_NL_warp} \ + -volreg_align_to MIN_OUTLIER \ + -volreg_align_e2a \ + -volreg_tlrc_warp \ + -volreg_compute_tsnr yes \ + -volreg_warp_dxyz ${final_dxyz} \ + -mask_epi_anat yes \ + -test_stim_files no \ + -regress_stim_times ${timing_files} \ + -regress_stim_labels ${stim_classes} \ + -regress_stim_types AM2 AM1 \ + -regress_basis_multi "dmUBLOCK(-1)" \ + -regress_motion_per_run \ + -regress_anaticor_fast \ + -regress_anaticor_fwhm 20 \ + -regress_anaticor_label eWMe \ + -regress_censor_motion ${cen_motion} \ + -regress_censor_outliers ${cen_outliers} \ + -regress_compute_fitts \ + -regress_opts_3dD -jobs ${njobs} \ + -num_glt 1 \ + -gltsym 'SYM: Resp[1] -Resp[2]' \ + -glt_label 1 gain-loss \ + -GOFORIT 10 \ + -regress_3dD_stop \ + -regress_reml_exec \ + -regress_opts_reml -GOFORIT \ + -regress_make_ideal_sum sum_ideal.1D \ + -regress_make_corr_vols eWMe \ + -regress_est_blur_errts \ + -regress_run_clustsim no \ + -html_review_style pythonic + +EOF + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +cd ${sdir_ap} + +# execute AP command to make processing script +tcsh -xef ${ap_cmd} |& tee output.ap.cmd.${subj} + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +# execute the proc script, saving text info +time tcsh -xef proc.${subj} |& tee output.proc.${subj} + +if ( ${status} ) then + echo "++ FAILED AP: ${ap_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED AP: ${ap_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_ap} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_ap}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_ap}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: AP (ecode = ${ecode})" +else + echo "++ GOOD FINISH: AP" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_23_ap_task_b.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_23_ap_task_b.tcsh new file mode 100644 index 00000000..b36a3d76 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_23_ap_task_b.tcsh @@ -0,0 +1,304 @@ +#!/bin/tcsh + +# AP_TASK: full task-based processing (voxelwise), with blurring +# with local EPI unifize + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +#set ses = $2 +set ap_label = 23_ap_task_b + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_timing = ${sdir_epi} +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +set sdir_events = ${dir_events}/${subj} #/${ses} +set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set dsets_epi = ( ${sdir_epi}/${subj}_*task*bold.nii* ) + +set dset_anat_00 = ${sdir_deob}/${subj}_T1w-deobl_nu.nii.gz #/${ses} +set anat_cp = ${sdir_ssw}/anatSS.${subj}.nii +set anat_skull = ${sdir_ssw}/anatU.${subj}.nii + +set dsets_NL_warp = ( ${sdir_ssw}/anatQQ.${subj}.nii \ + ${sdir_ssw}/anatQQ.${subj}.aff12.1D \ + ${sdir_ssw}/anatQQ.${subj}_WARP.nii ) + +# might not always use these +set mask_wm = ${sdir_suma}/fs_ap_wm.nii.gz +set roi_all_2000 = ${sdir_suma}/aparc+aseg_REN_all.nii.gz +set roi_gmr_2000 = ${sdir_suma}/aparc+aseg_REN_gmrois.nii.gz + +set timing_files = ( ${sdir_events}/times.{Resp,NoResp}.txt ) +set stim_classes = ( Resp NoResp ) + +# control variables +###set nt_rm = 0 +set blur_size = 4.0 +set final_dxyz = 2.0 +set cen_motion = 0.3 +set cen_outliers = 0.05 + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_ap} + set sdir_ap = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +set ap_cmd = ${sdir_ap}/ap.cmd.${subj} + +\mkdir -p ${sdir_ap} + +# write AP command to file +cat <! ${ap_cmd} + +# Some notes on afni_proc.py (AP) option choices: +# +# The blur block is used here, because this is processing for voxelwise +# analysis. See the 'do_22*.tcsh' script for the related processing that +# does **not** include blurring (for ROI-based analysis). The blur size +# is set to be about 2 times the average voxel edge length. +# +# This adds useful APQC HTML items, radial correlation images of initial +# and volume-registered data (might see artifacts): +# -radial_correlate_blocks tcat volreg +# +# Even though we load the skullstripped anatomical (proc'ed by @SSwarper), +# having the original, skull-on dataset brought along as a follower dset +# can be useful for verifying EPI-anatomical alignment when the brigt CSF +# extends outside the brain. +# -anat_follower anat_w_skull anat \${anat_skull} +# +# Generally recommended to run @SSwarper prior to afni_proc.py for +# skullstripping (SS) the anatomical and estimating nonlinear warp to +# template; then provide those results in options here: +# -copy_anat \${anat_cp} +# ... +# -tlrc_base \${template} +# -tlrc_NL_warp +# -tlrc_NL_warped_dsets \${dsets_NL_warp} +# +# This option can help improve EPI-anatomical alignment, esp. if the EPI +# has brightness inhomogeneity (and it doesn't seem to hurt alignment even +# if that is not the case); generally recommended with human FMRI +# data processing nowadays: +# -align_unifize_epi local +# +# Generally recommended starting point for EPI-anatomical alignment in human +# FMRI proc (left-right flipping can still occur...): +# -align_opts_aea -cost lpc+ZZ -giant_move -check_flip +# +# Which EPI should be a consistently good choice to serve as a +# reference for both motion correction and EPI-anatomical alignment? +# The one with the fewest outliers (and so low motion) sounds good: +# -volreg_align_to MIN_OUTLIER +# +# Add a post-volreg TSNR plot to the APQC HTML: +# -volreg_compute_tsnr yes +# +# Create useful mask from EPI-anatomical mask intersection (not applied +# to the EPI data here, but used to identify brain region): +# -mask_epi_anat yes +# +# Compute a time series that is the sum of all non-baseline regressors, +# for QC visualization: +# -regress_make_ideal_sum sum_ideal.1D +# +# Choose this shape and scaling for the regression basis; the '-1' in the +# argument means that an event with 1 s duration is scaled to 1; the choice +# of number is based on typical or average event duration: +# -regress_basis_multi "dmUBLOCK(-1)" +# +# Try to use Python's Matplotlib module when making the APQC HTML doc, for +# prettier (and more informative) plots; this is actually the default now: +# -html_review_style pythonic +# + + +afni_proc.py \ + -subj_id ${subj} \ + -blocks tshift align tlrc volreg blur mask scale regress \ + -radial_correlate_blocks tcat volreg \ + -copy_anat ${anat_cp} \ + -anat_has_skull no \ + -anat_follower anat_w_skull anat ${anat_skull} \ + -anat_follower_ROI a00all anat ${roi_all_2000} \ + -anat_follower_ROI e00all epi ${roi_all_2000} \ + -anat_follower_ROI a00gmr anat ${roi_gmr_2000} \ + -anat_follower_ROI e00gmr epi ${roi_gmr_2000} \ + -anat_follower_ROI eWMe epi ${mask_wm} \ + -anat_follower_erode eWMe \ + -dsets ${dsets_epi} \ + -tcat_remove_first_trs 0 \ + -tshift_opts_ts -tpattern alt+z2 \ + -align_unifize_epi local \ + -align_opts_aea -cost lpc+ZZ \ + -giant_move \ + -check_flip \ + -tlrc_base ${template} \ + -tlrc_NL_warp \ + -tlrc_NL_warped_dsets ${dsets_NL_warp} \ + -volreg_align_to MIN_OUTLIER \ + -volreg_align_e2a \ + -volreg_tlrc_warp \ + -volreg_compute_tsnr yes \ + -volreg_warp_dxyz ${final_dxyz} \ + -blur_size ${blur_size} \ + -mask_epi_anat yes \ + -test_stim_files no \ + -regress_stim_times ${timing_files} \ + -regress_stim_labels ${stim_classes} \ + -regress_stim_types AM2 AM1 \ + -regress_basis_multi "dmUBLOCK(-1)" \ + -regress_motion_per_run \ + -regress_anaticor_fast \ + -regress_anaticor_fwhm 20 \ + -regress_anaticor_label eWMe \ + -regress_censor_motion ${cen_motion} \ + -regress_censor_outliers ${cen_outliers} \ + -regress_compute_fitts \ + -regress_opts_3dD -jobs ${njobs} \ + -num_glt 1 \ + -gltsym 'SYM: Resp[1] -Resp[2]' \ + -glt_label 1 gain-loss \ + -GOFORIT 10 \ + -regress_3dD_stop \ + -regress_reml_exec \ + -regress_opts_reml -GOFORIT \ + -regress_make_ideal_sum sum_ideal.1D \ + -regress_make_corr_vols eWMe \ + -regress_est_blur_errts \ + -regress_run_clustsim no \ + -html_review_style pythonic + +EOF + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +cd ${sdir_ap} + +# execute AP command to make processing script +tcsh -xef ${ap_cmd} |& tee output.ap.cmd.${subj} + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +# execute the proc script, saving text info +time tcsh -xef proc.${subj} |& tee output.proc.${subj} + +if ( ${status} ) then + echo "++ FAILED AP: ${ap_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED AP: ${ap_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_ap} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_ap}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_ap}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: AP (ecode = ${ecode})" +else + echo "++ GOOD FINISH: AP" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_52_ap_qc.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_52_ap_qc.tcsh new file mode 100644 index 00000000..811bce62 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_52_ap_qc.tcsh @@ -0,0 +1,340 @@ +#!/bin/tcsh + +# QC: GSSRT QC, inclusion/exclusion criteria for subj +# will output into the particular AP dir, hence use ap_label + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +#set subj = $1 # subj not used here +#set ses = $2 +set ap_label = 22_ap_task + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +set dir_grpqc = ${dir_ap}/QC + +# subject directories +#set sdir_basic = ${dir_basic}/${subj} #/${ses} +#set sdir_epi = ${sdir_basic}/func +#set sdir_anat = ${sdir_basic}/anat +#set sdir_timing = ${sdir_epi} +#set sdir_deob = ${dir_deob}/${subj} #/${ses} +#set sdir_fs = ${dir_fs}/${subj} #/${ses} +#set sdir_suma = ${sdir_fs}/SUMA +#set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +#set sdir_events = ${dir_events}/${subj} #/${ses} +#set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +# for globbing participants.tsv +set all_grp = ( equalRange equalIndif ) + +# control variables +set part_file = ${dir_store}/participants.tsv + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${dir_grpqc} + set dir_grpqc = /lscratch/$SLURM_JOBID #/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${dir_grpqc} + +set gssrt_cmd = ${dir_grpqc}/do_gssrt.tcsh + +# write AP command to file +cat <! ${gssrt_cmd} +#!/bin/tcsh + +echo "++ Start GSSRT script" + +set dir_ap = ${dir_ap} +set all_infiles = ( \${dir_ap}/sub*/s*.results/out.ss*.txt ) + +echo "++ Found \${#all_infiles} out.ss*.txt review files" + +# a kind of look for bad subjects +gen_ss_review_table.py \ + -outlier_sep space \ + -report_outliers 'AFNI version' VARY \ + -report_outliers 'censor fraction' GE 0.1 \ + -report_outliers 'average censored motion' GE 0.1 \ + -report_outliers 'max censored displacement' GE 8 \ + -report_outliers 'num regs of interest' NE 4 \ + -report_outliers 'TSNR average' LT 30 \ + -report_outliers 'final voxel resolution' NE 2 \ + -report_outliers 'num TRs per run' NE 453 \ + -infiles \${all_infiles} \ + -write_outliers outliers.a.long.txt \ + -overwrite + +gen_ss_review_table.py \ + -outlier_sep space \ + -report_outliers 'AFNI version' VARY \ + -report_outliers 'num regs of interest' VARY \ + -report_outliers 'final voxel resolution' VARY \ + -report_outliers 'num TRs per run' VARY \ + -infiles \${all_infiles} \ + -write_outliers outliers.a.VARY.txt \ + -overwrite + +# ** the one that will be used for incl/excl in this study ** +gen_ss_review_table.py \ + -outlier_sep space \ + -report_outliers 'censor fraction' GE 0.1 \ + -report_outliers 'average censored motion' GE 0.1 \ + -report_outliers 'max censored displacement' GE 8 \ + -infiles \${all_infiles} \ + -write_outliers outliers.b.short.txt \ + -overwrite + +# ============================================================================ +# list bad subj to drop + +set bad_subs = ( \`awk '{if (NR>2) print \$1}' outliers.b.short.txt\` ) +awk '{if (NR>2) print \$1}' outliers.b.short.txt \ + > outliers.c.drop.subs.txt +echo "" +echo "=== subjects to drop: \${bad_subs}" +echo "" + +# ============================================================================ +# generate review table spreadsheet + +echo "====== generate review table and label list" +gen_ss_review_table.py \ + -tablefile ss_review_table.xls \ + -infiles \${all_infiles} + +# ... and note labels +gen_ss_review_table.py \ + -showlabs \ + -infiles \${all_infiles} \ + >& ss_review_labels.txt + +# ============================================================================ +# ACF params and average + +echo "====== collect and average ACF parameters" + +# generate review table spreadsheet +grep -h ACF \${all_infiles} \ + | awk -F: '{print \$2}' \ + > out.ACF.vals.1D + +grep -h ACF \${all_infiles} \ + | awk -F: '{print \$2}' \ + | 3dTstat -mean -prefix - 1D:stdin\\' \ + > out.ACF.means.1D + +# ============================================================================ +# masks + +echo "====== making intersection, mean and 70% masks" + +set all_mask = ( \${dir_ap}/sub*/s*.results/mask_epi_anat*.HEAD ) + +echo "++ Found \${#all_mask} mask_epi_anat*.HEAD dsets" + +3dTstat \ + -mean \ + -prefix mask.mean.nii.gz \ + "\${all_mask}" + +3dmask_tool \ + -prefix group_mask.7.nii.gz \ + -frac 0.7 \ + -input \${all_mask} + +3dmask_tool \ + -prefix group_mask.inter.nii.gz \ + -frac 1.0 \ + -input \${all_mask} + +set group_mask = group_mask.inter.nii.gz + +# ============================================================================ +# get anats and EPI for registration comparison + +echo "====== making mean and TCAT anat and EPI dsets" + +set all_epi_vr = ( \${dir_ap}/sub*/s*.results/final_epi_vr*.HEAD ) +set all_anat_final = ( \${dir_ap}/sub*/s*.results/anat_final*.HEAD ) + +echo "++ Found \${#all_epi_vr} final_epi_vr*.HEAD dsets" +echo "++ Found \${#all_anat_final} anat_final*.HEAD" + +# TR opt just to quiet warnings +3dTcat \ + -tr 1 \ + -prefix all.EPI.vr.tcat \ + \${all_epi_vr} + +3dTcat \ + -tr 1 \ + -prefix all.anat.final.tcat \ + \${all_anat_final} + +3dTstat \ + -mean \ + -prefix all.EPI.mean \ + "\${all_epi_vr}" + +3dTstat \ + -mean \ + -prefix all.anat.mean \ + "\${all_anat_final}" + + +# might make a probability map of FS ROIs, but do it in another script + +# ============================================================================ +# clustsim? Naybe not in the case of ETAC no blur + +# *** not including here at the moment *** + + +echo "====== done here" + +exit 0 + +EOF + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +cd ${dir_grpqc} + +# execute AP command to make processing script +tcsh -ef ${gssrt_cmd} |& tee log_gssrt_cmd.txt + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +# make lists of subj IDs per group, after applying drop criteria + +foreach grp ( ${all_grp} ) + + # create list of all subj in this group + grep --color=never "${grp}" ${part_file} \ + | awk '{print $1}' \ + > list_grp_${grp}_all.txt + + # create list of remainder after applying drop rules + set f1 = outliers.c.drop.subs.txt + set f2 = list_grp_${grp}_all.txt + bash -c "comm -13 <(sort ${f1}) <(sort ${f2})" \ + > list_grp_${grp}_final.txt + + set nsubj_all = `cat list_grp_${grp}_all.txt | wc -l` + set nsubj_fin = `cat list_grp_${grp}_final.txt | wc -l` + echo "++ Final ${grp} list has ${nsubj_fin} in it (from init ${nsubj_all})" +end + + +if ( ${status} ) then + echo "++ FAILED QC: ${ap_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED QC: ${ap_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${dir_grpqc} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${dir_grpqc}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${dir_grpqc}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: QC (ecode = ${ecode})" +else + echo "++ GOOD FINISH: QC" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_53_ap_qc.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_53_ap_qc.tcsh new file mode 100644 index 00000000..385df7bd --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_53_ap_qc.tcsh @@ -0,0 +1,341 @@ +#!/bin/tcsh + +# QC: GSSRT QC, inclusion/exclusion criteria for subj +# will output into the particular AP dir, hence use ap_label +# -> for the 23_ap proc + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +#set subj = $1 # subj not used here +#set ses = $2 +set ap_label = 23_ap_task_b + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +set dir_grpqc = ${dir_ap}/QC + +# subject directories +#set sdir_basic = ${dir_basic}/${subj} #/${ses} +#set sdir_epi = ${sdir_basic}/func +#set sdir_anat = ${sdir_basic}/anat +#set sdir_timing = ${sdir_epi} +#set sdir_deob = ${dir_deob}/${subj} #/${ses} +#set sdir_fs = ${dir_fs}/${subj} #/${ses} +#set sdir_suma = ${sdir_fs}/SUMA +#set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +#set sdir_events = ${dir_events}/${subj} #/${ses} +#set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +# for globbing participants.tsv +set all_grp = ( equalRange equalIndif ) + +# control variables +set part_file = ${dir_store}/participants.tsv + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${dir_grpqc} + set dir_grpqc = /lscratch/$SLURM_JOBID #/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${dir_grpqc} + +set gssrt_cmd = ${dir_grpqc}/do_gssrt.tcsh + +# write AP command to file +cat <! ${gssrt_cmd} +#!/bin/tcsh + +echo "++ Start GSSRT script" + +set dir_ap = ${dir_ap} +set all_infiles = ( \${dir_ap}/sub*/s*.results/out.ss*.txt ) + +echo "++ Found \${#all_infiles} out.ss*.txt review files" + +# a kind of look for bad subjects +gen_ss_review_table.py \ + -outlier_sep space \ + -report_outliers 'AFNI version' VARY \ + -report_outliers 'censor fraction' GE 0.1 \ + -report_outliers 'average censored motion' GE 0.1 \ + -report_outliers 'max censored displacement' GE 8 \ + -report_outliers 'num regs of interest' NE 4 \ + -report_outliers 'TSNR average' LT 30 \ + -report_outliers 'final voxel resolution' NE 2 \ + -report_outliers 'num TRs per run' NE 453 \ + -infiles \${all_infiles} \ + -write_outliers outliers.a.long.txt \ + -overwrite + +gen_ss_review_table.py \ + -outlier_sep space \ + -report_outliers 'AFNI version' VARY \ + -report_outliers 'num regs of interest' VARY \ + -report_outliers 'final voxel resolution' VARY \ + -report_outliers 'num TRs per run' VARY \ + -infiles \${all_infiles} \ + -write_outliers outliers.a.VARY.txt \ + -overwrite + +# ** the one that will be used for incl/excl in this study ** +gen_ss_review_table.py \ + -outlier_sep space \ + -report_outliers 'censor fraction' GE 0.1 \ + -report_outliers 'average censored motion' GE 0.1 \ + -report_outliers 'max censored displacement' GE 8 \ + -infiles \${all_infiles} \ + -write_outliers outliers.b.short.txt \ + -overwrite + +# ============================================================================ +# list bad subj to drop + +set bad_subs = ( \`awk '{if (NR>2) print \$1}' outliers.b.short.txt\` ) +awk '{if (NR>2) print \$1}' outliers.b.short.txt \ + > outliers.c.drop.subs.txt +echo "" +echo "=== subjects to drop: \${bad_subs}" +echo "" + +# ============================================================================ +# generate review table spreadsheet + +echo "====== generate review table and label list" +gen_ss_review_table.py \ + -tablefile ss_review_table.xls \ + -infiles \${all_infiles} + +# ... and note labels +gen_ss_review_table.py \ + -showlabs \ + -infiles \${all_infiles} \ + >& ss_review_labels.txt + +# ============================================================================ +# ACF params and average + +echo "====== collect and average ACF parameters" + +# generate review table spreadsheet +grep -h ACF \${all_infiles} \ + | awk -F: '{print \$2}' \ + > out.ACF.vals.1D + +grep -h ACF \${all_infiles} \ + | awk -F: '{print \$2}' \ + | 3dTstat -mean -prefix - 1D:stdin\\' \ + > out.ACF.means.1D + +# ============================================================================ +# masks + +echo "====== making intersection, mean and 70% masks" + +set all_mask = ( \${dir_ap}/sub*/s*.results/mask_epi_anat*.HEAD ) + +echo "++ Found \${#all_mask} mask_epi_anat*.HEAD dsets" + +3dTstat \ + -mean \ + -prefix mask.mean.nii.gz \ + "\${all_mask}" + +3dmask_tool \ + -prefix group_mask.7.nii.gz \ + -frac 0.7 \ + -input \${all_mask} + +3dmask_tool \ + -prefix group_mask.inter.nii.gz \ + -frac 1.0 \ + -input \${all_mask} + +set group_mask = group_mask.inter.nii.gz + +# ============================================================================ +# get anats and EPI for registration comparison + +echo "====== making mean and TCAT anat and EPI dsets" + +set all_epi_vr = ( \${dir_ap}/sub*/s*.results/final_epi_vr*.HEAD ) +set all_anat_final = ( \${dir_ap}/sub*/s*.results/anat_final*.HEAD ) + +echo "++ Found \${#all_epi_vr} final_epi_vr*.HEAD dsets" +echo "++ Found \${#all_anat_final} anat_final*.HEAD" + +# TR opt just to quiet warnings +3dTcat \ + -tr 1 \ + -prefix all.EPI.vr.tcat \ + \${all_epi_vr} + +3dTcat \ + -tr 1 \ + -prefix all.anat.final.tcat \ + \${all_anat_final} + +3dTstat \ + -mean \ + -prefix all.EPI.mean \ + "\${all_epi_vr}" + +3dTstat \ + -mean \ + -prefix all.anat.mean \ + "\${all_anat_final}" + + +# might make a probability map of FS ROIs, but do it in another script + +# ============================================================================ +# clustsim? Naybe not in the case of ETAC no blur + +# *** not including here at the moment *** + + +echo "====== done here" + +exit 0 + +EOF + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +cd ${dir_grpqc} + +# execute AP command to make processing script +tcsh -ef ${gssrt_cmd} |& tee log_gssrt_cmd.txt + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +# make lists of subj IDs per group, after applying drop criteria + +foreach grp ( ${all_grp} ) + + # create list of all subj in this group + grep --color=never "${grp}" ${part_file} \ + | awk '{print $1}' \ + > list_grp_${grp}_all.txt + + # create list of remainder after applying drop rules + set f1 = outliers.c.drop.subs.txt + set f2 = list_grp_${grp}_all.txt + bash -c "comm -13 <(sort ${f1}) <(sort ${f2})" \ + > list_grp_${grp}_final.txt + + set nsubj_all = `cat list_grp_${grp}_all.txt | wc -l` + set nsubj_fin = `cat list_grp_${grp}_final.txt | wc -l` + echo "++ Final ${grp} list has ${nsubj_fin} in it (from init ${nsubj_all})" +end + + +if ( ${status} ) then + echo "++ FAILED QC: ${ap_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED QC: ${ap_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${dir_grpqc} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${dir_grpqc}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${dir_grpqc}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: QC (ecode = ${ecode})" +else + echo "++ GOOD FINISH: QC" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_61_etac_1grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_61_etac_1grp.tcsh new file mode 100644 index 00000000..9926cdf2 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_61_etac_1grp.tcsh @@ -0,0 +1,212 @@ +#!/bin/tcsh + +# ETAC: 3dttest with ETAC opts for a single group +# will output into the particular AP dir, hence use ap_label + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set grp = "$1" # grep to extract from TSV +set cond = "$2" +set ap_label = 22_ap_task +set grp_label = group_analysis.ETAC.1grp.${grp}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +set dir_grpqc = ${dir_ap}/QC +set dir_etac = ${dir_ap}/${grp_label} + +# subject directories +#set sdir_basic = ${dir_basic}/${subj} #/${ses} +#set sdir_epi = ${sdir_basic}/func +#set sdir_anat = ${sdir_basic}/anat +#set sdir_timing = ${sdir_epi} +#set sdir_deob = ${dir_deob}/${subj} #/${ses} +#set sdir_fs = ${dir_fs}/${subj} #/${ses} +#set sdir_suma = ${sdir_fs}/SUMA +#set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +#set sdir_events = ${dir_events}/${subj} #/${ses} +#set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +set mask_name = group_mask.inter.nii.gz +set all_dset_reml = ( ${dir_ap}/sub-*/*.results/stats.sub*REML+tlrc.HEAD ) + +# participants file +set part_file = ${dir_store}/participants.tsv +# all subs in input group +set all_subj = ( `grep --color=never ${grp} ${part_file} \ + | awk '{print $1}'` ) +# all subs to drop +set all_drop = ( `cat ${dir_grpqc}/outliers.c.drop.subs.txt` ) + +echo "++ Found ${#all_subj} in the initial list of subs in grp '${grp}'" +echo "++ The full (multi-group) drop list has ${#all_drop} subj" +echo "++ The full (multi-group) REML dset list has ${#all_dset_reml} files" + +# if there are subjects to drop, include such an option +if ( ${#all_drop} > 0 ) then + set drop_opt = ( -dset_sid_omit_list ${all_drop} ) +else + set drop_opt = ( ) +endif + +# control variables +set label = ${grp}.${cond} +set tt_script = run.tt.${grp}.${cond}.tcsh + +# put this here, because having the '#' creates issues with swarm execution +if ( "${cond}" == "gain" ) then + set beta = "Resp#1_Coef" +else if ( "${cond}" == "loss" ) then + set beta = "Resp#2_Coef" +else + echo "** ERROR: bad variable value for cond: '${cond}'" + exit 1 +endif + + + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${dir_etac} + set dir_etac = /lscratch/$SLURM_JOBID #/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${dir_etac} + +# copy a couple of datasets, for convenience +\cp -p ${template} ${dir_etac}/. +3dcopy -overwrite ${dir_grpqc}/${mask_name} ${dir_etac}/ + +cd ${dir_etac} + +# list ALL subject datasets, then specify which to use/drop +gen_group_command.py \ + -command 3dttest++ \ + -write_script ${tt_script} \ + -dsets ${all_dset_reml} \ + -dset_sid_list ${all_subj} \ + ${drop_opt} \ + -subj_prefix sub- \ + -set_labels ${label} \ + -subs_betas "${beta}" \ + -verb 2 \ + -options \ + -mask ${mask_name} \ + -Clustsim \ + -ETAC -ETAC_blur 2 4 6 \ + -ETAC_opt pthr=0.01,0.005,0.002,0.001,0.0005:fpr=MUCHO \ + |& tee out.ggc + + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +tcsh -x ${tt_script} |& tee out.${tt_script} + + +if ( ${status} ) then + echo "++ FAILED ETAC: ${grp_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED ETAC: ${grp_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${dir_etac} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${dir_etac}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${dir_etac}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: ETAC (ecode = ${ecode})" +else + echo "++ GOOD FINISH: ETAC" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_62_etac_2grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_62_etac_2grp.tcsh new file mode 100644 index 00000000..ef7f0398 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_62_etac_2grp.tcsh @@ -0,0 +1,219 @@ +#!/bin/tcsh + +# ETAC: 3dttest with ETAC opts for a group contrast +# will output into the particular AP dir, hence use ap_label + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set grp1 = "$1" # grep to extract from TSV +set grp2 = "$2" +set cond = "$3" +set ap_label = 22_ap_task +set grp_label = group_analysis.ETAC.2grp.${grp1}-${grp2}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +set dir_grpqc = ${dir_ap}/QC +set dir_etac = ${dir_ap}/${grp_label} + +# subject directories +#set sdir_basic = ${dir_basic}/${subj} #/${ses} +#set sdir_epi = ${sdir_basic}/func +#set sdir_anat = ${sdir_basic}/anat +#set sdir_timing = ${sdir_epi} +#set sdir_deob = ${dir_deob}/${subj} #/${ses} +#set sdir_fs = ${dir_fs}/${subj} #/${ses} +#set sdir_suma = ${sdir_fs}/SUMA +#set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +#set sdir_events = ${dir_events}/${subj} #/${ses} +#set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +set mask_name = group_mask.inter.nii.gz +set all_dset_reml = ( ${dir_ap}/sub-*/*.results/stats.sub*REML+tlrc.HEAD ) + +# participants file +set part_file = ${dir_store}/participants.tsv +# all subs in input group +set all_subj1 = ( `grep --color=never ${grp1} ${part_file} \ + | awk '{print $1}'` ) +set all_subj2 = ( `grep --color=never ${grp2} ${part_file} \ + | awk '{print $1}'` ) +# all subs to drop +set all_drop = ( `cat ${dir_grpqc}/outliers.c.drop.subs.txt` ) + +echo "++ Found ${#all_subj1} in the initial list of subs in grp1 '${grp1}'" +echo "++ Found ${#all_subj2} in the initial list of subs in grp2 '${grp2}'" +echo "++ The full (multi-group) drop list has ${#all_drop} subj" +echo "++ The full (multi-group) REML dset list has ${#all_dset_reml} files" + +# if there are subjects to drop, include such an option +if ( ${#all_drop} > 0 ) then + set drop_opt = ( -dset_sid_omit_list ${all_drop} ) +else + set drop_opt = ( ) +endif + +# control variables +set label = ${grp1}-${grp2}.${cond} +set tt_script = run.tt.${label}.tcsh + +# put this here, because having the '#' creates issues with swarm execution +if ( "${cond}" == "gain" ) then + set beta = "Resp#1_Coef" +else if ( "${cond}" == "loss" ) then + set beta = "Resp#2_Coef" +else + echo "** ERROR: bad variable value for cond: '${cond}'" + exit 1 +endif + + + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${dir_etac} + set dir_etac = /lscratch/$SLURM_JOBID #/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${dir_etac} + +# copy a couple of datasets, for convenience +\cp -p ${template} ${dir_etac}/. +3dcopy -overwrite ${dir_grpqc}/${mask_name} ${dir_etac}/ + +cd ${dir_etac} + +# list ALL subject datasets, then specify which to use/drop +gen_group_command.py \ + -command 3dttest++ \ + -write_script ${tt_script} \ + -dsets ${all_dset_reml} \ + -dset_sid_list ${all_subj1} \ + ${drop_opt} \ + -dsets ${all_dset_reml} \ + -dset_sid_list ${all_subj2} \ + ${drop_opt} \ + -subj_prefix sub- \ + -set_labels ${label} \ + -subs_betas "${beta}" \ + -verb 2 \ + -options \ + -mask ${mask_name} \ + -Clustsim \ + -ETAC -ETAC_blur 2 4 6 \ + -ETAC_opt pthr=0.01,0.005,0.002,0.001,0.0005:fpr=MUCHO \ + |& tee out.ggc + + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +tcsh -x ${tt_script} |& tee out.${tt_script} + + +if ( ${status} ) then + echo "++ FAILED ETAC: ${grp_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED ETAC: ${grp_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${dir_etac} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${dir_etac}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${dir_etac}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: ETAC (ecode = ${ecode})" +else + echo "++ GOOD FINISH: ETAC" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_63_ttest_1grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_63_ttest_1grp.tcsh new file mode 100644 index 00000000..87367d51 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_63_ttest_1grp.tcsh @@ -0,0 +1,208 @@ +#!/bin/tcsh + +# TTEST: unmasked 3dttest for a single group +# will output into the particular AP dir, hence use ap_label + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set grp = "$1" # grep to extract from TSV +set cond = "$2" +set ap_label = 23_ap_task_b +set grp_label = group_analysis.ttest.1grp.${grp}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +set dir_grpqc = ${dir_ap}/QC +set dir_ttest = ${dir_ap}/${grp_label} + +# subject directories +#set sdir_basic = ${dir_basic}/${subj} #/${ses} +#set sdir_epi = ${sdir_basic}/func +#set sdir_anat = ${sdir_basic}/anat +#set sdir_timing = ${sdir_epi} +#set sdir_deob = ${dir_deob}/${subj} #/${ses} +#set sdir_fs = ${dir_fs}/${subj} #/${ses} +#set sdir_suma = ${sdir_fs}/SUMA +#set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +#set sdir_events = ${dir_events}/${subj} #/${ses} +#set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +set mask_name = group_mask.inter.nii.gz +set all_dset_reml = ( ${dir_ap}/sub-*/*.results/stats.sub*REML+tlrc.HEAD ) + +# participants file +set part_file = ${dir_store}/participants.tsv +# all subs in input group +set all_subj = ( `grep --color=never ${grp} ${part_file} \ + | awk '{print $1}'` ) +# all subs to drop +set all_drop = ( `cat ${dir_grpqc}/outliers.c.drop.subs.txt` ) + +echo "++ Found ${#all_subj} in the initial list of subs in grp '${grp}'" +echo "++ The full (multi-group) drop list has ${#all_drop} subj" +echo "++ The full (multi-group) REML dset list has ${#all_dset_reml} files" + +# if there are subjects to drop, include such an option +if ( ${#all_drop} > 0 ) then + set drop_opt = ( -dset_sid_omit_list ${all_drop} ) +else + set drop_opt = ( ) +endif + +# control variables +set label = ${grp}.${cond} +set tt_script = run.tt.${grp}.${cond}.tcsh + +# put this here, because having the '#' creates issues with swarm execution +if ( "${cond}" == "gain" ) then + set beta = "Resp#1_Coef" +else if ( "${cond}" == "loss" ) then + set beta = "Resp#2_Coef" +else + echo "** ERROR: bad variable value for cond: '${cond}'" + exit 1 +endif + + + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${dir_ttest} + set dir_ttest = /lscratch/$SLURM_JOBID #/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${dir_ttest} + +# copy a couple of datasets, for convenience +\cp -p ${template} ${dir_ttest}/. +3dcopy -overwrite ${dir_grpqc}/${mask_name} ${dir_ttest}/ + +cd ${dir_ttest} + +# list ALL subject datasets, then specify which to use/drop +gen_group_command.py \ + -command 3dttest++ \ + -write_script ${tt_script} \ + -dsets ${all_dset_reml} \ + -dset_sid_list ${all_subj} \ + ${drop_opt} \ + -subj_prefix sub- \ + -set_labels ${label} \ + -subs_betas "${beta}" \ + -prefix ttest_${label}.nii.gz \ + -verb 2 \ + |& tee out.ggc + + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +tcsh -x ${tt_script} |& tee out.${tt_script} + + +if ( ${status} ) then + echo "++ FAILED TTEST: ${grp_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED TTEST: ${grp_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${dir_ttest} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${dir_ttest}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${dir_ttest}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: TTEST (ecode = ${ecode})" +else + echo "++ GOOD FINISH: TTEST" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_64_csim_1grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_64_csim_1grp.tcsh new file mode 100644 index 00000000..53ad3747 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_64_csim_1grp.tcsh @@ -0,0 +1,210 @@ +#!/bin/tcsh + +# CSIM: 3dttest with Clustsim opts for a single group +# will output into the particular AP dir, hence use ap_label + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +# + consider using up to 4 threads, because of "-parallel" in recon-all +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set grp = "$1" # grep to extract from TSV +set cond = "$2" +set ap_label = 23_ap_task_b +set grp_label = group_analysis.CSIM.1grp.${grp}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} + +set dir_grpqc = ${dir_ap}/QC +set dir_csim = ${dir_ap}/${grp_label} + +# subject directories +#set sdir_basic = ${dir_basic}/${subj} #/${ses} +#set sdir_epi = ${sdir_basic}/func +#set sdir_anat = ${sdir_basic}/anat +#set sdir_timing = ${sdir_epi} +#set sdir_deob = ${dir_deob}/${subj} #/${ses} +#set sdir_fs = ${dir_fs}/${subj} #/${ses} +#set sdir_suma = ${sdir_fs}/SUMA +#set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +#set sdir_events = ${dir_events}/${subj} #/${ses} +#set sdir_ap = ${dir_ap}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +set mask_name = group_mask.inter.nii.gz +set all_dset_reml = ( ${dir_ap}/sub-*/*.results/stats.sub*REML+tlrc.HEAD ) + +# participants file +set part_file = ${dir_store}/participants.tsv +# all subs in input group +set all_subj = ( `grep --color=never ${grp} ${part_file} \ + | awk '{print $1}'` ) +# all subs to drop +set all_drop = ( `cat ${dir_grpqc}/outliers.c.drop.subs.txt` ) + +echo "++ Found ${#all_subj} in the initial list of subs in grp '${grp}'" +echo "++ The full (multi-group) drop list has ${#all_drop} subj" +echo "++ The full (multi-group) REML dset list has ${#all_dset_reml} files" + +# if there are subjects to drop, include such an option +if ( ${#all_drop} > 0 ) then + set drop_opt = ( -dset_sid_omit_list ${all_drop} ) +else + set drop_opt = ( ) +endif + +# control variables +set label = ${grp}.${cond} +set tt_script = run.tt.${grp}.${cond}.tcsh + +# put this here, because having the '#' creates issues with swarm execution +if ( "${cond}" == "gain" ) then + set beta = "Resp#1_Coef" +else if ( "${cond}" == "loss" ) then + set beta = "Resp#2_Coef" +else + echo "** ERROR: bad variable value for cond: '${cond}'" + exit 1 +endif + + + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${dir_csim} + set dir_csim = /lscratch/$SLURM_JOBID #/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${dir_csim} + +# copy a couple of datasets, for convenience +\cp -p ${template} ${dir_csim}/. +3dcopy -overwrite ${dir_grpqc}/${mask_name} ${dir_csim}/ + +cd ${dir_csim} + +# list ALL subject datasets, then specify which to use/drop +gen_group_command.py \ + -command 3dttest++ \ + -write_script ${tt_script} \ + -dsets ${all_dset_reml} \ + -dset_sid_list ${all_subj} \ + ${drop_opt} \ + -subj_prefix sub- \ + -set_labels ${label} \ + -subs_betas "${beta}" \ + -verb 2 \ + -options \ + -mask ${mask_name} \ + -Clustsim \ + |& tee out.ggc + + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +tcsh -x ${tt_script} |& tee out.${tt_script} + + +if ( ${status} ) then + echo "++ FAILED CSIM: ${grp_label}" + set ecode = 1 + goto COPY_AND_EXIT +else + echo "++ FINISHED CSIM: ${grp_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${dir_csim} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${dir_csim}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${dir_csim}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: CSIM (ecode = ${ecode})" +else + echo "++ GOOD FINISH: CSIM" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71a_rba_prep.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71a_rba_prep.tcsh new file mode 100644 index 00000000..2145ac43 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71a_rba_prep.tcsh @@ -0,0 +1,257 @@ +#!/bin/tcsh + +# RBA_PREP: make tables of ROI properties per subj, to be combined for RBA + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set subj = $1 +set grp = $2 +set cond = $3 + +set ap_label = 22_ap_task +set grp_label = ${grp}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} +set dir_rba_prep = ${dir_inroot}/data_71_rba_prep/${grp_label} + +# subject directories +set sdir_basic = ${dir_basic}/${subj} #/${ses} +set sdir_epi = ${sdir_basic}/func +set sdir_anat = ${sdir_basic}/anat +set sdir_timing = ${sdir_epi} +set sdir_deob = ${dir_deob}/${subj} #/${ses} +set sdir_fs = ${dir_fs}/${subj} #/${ses} +set sdir_suma = ${sdir_fs}/SUMA +set sdir_ssw = ${dir_ssw}/${subj} #/${ses} +set sdir_events = ${dir_events}/${subj} #/${ses} +set sdir_ap = ${dir_ap}/${subj} #/${ses} +set sdir_rba_prep = ${dir_rba_prep}/${subj} #/${ses} + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz +set atl_glass = ${dir_extra}/MNI_Glasser_HCP_v1.0.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs +set sdir_apres = ${sdir_ap}/${subj}.results + +# control variables + +# put this here, because having the '#' creates issues with swarm execution +if ( "${cond}" == "gain" ) then + set beta = "Resp#1_Coef" +else if ( "${cond}" == "loss" ) then + set beta = "Resp#2_Coef" +else + echo "** ERROR: bad variable value for cond: '${cond}'" + exit 1 +endif + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# try to use /lscratch for speed +if ( -d /lscratch/$SLURM_JOBID ) then + set usetemp = 1 + set sdir_BW = ${sdir_rba_prep} + set sdir_rba_prep = /lscratch/$SLURM_JOBID/${subj} #_${ses} + + # prep for group permission reset + \mkdir -p ${sdir_BW} + set grp_own = `\ls -ld ${sdir_BW} | awk '{print $4}'` +else + set usetemp = 0 +endif +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +\mkdir -p ${sdir_rba_prep} + +cd ${sdir_rba_prep} + +# ============================================================ +echo "++ Extract ROI info for: FS-2000 (Desikan Killiany)" + +set dset_roi = ${sdir_apres}/follow_ROI_e00gmr+tlrc.HEAD # FS D-K atl + +# get GM ROI list, remove non-ROI like ones, and then get the col of names +set dir_abin = `which afni` +set dir_abin = ${dir_abin:h} +set all_gmrois = `grep --color=never tiss__gm \ + ${dir_abin}/afni_fs_aparc+aseg_2000.txt \ + | grep -v "Cerebral-Cortex" \ + | awk '{print $2}'` + +echo "++ Found ${#all_gmrois} ROI labels" + +# prep and clear stats file +set bname = roistats_${subj}_ROI_FS_REN_gmrois +set file_ostats = ${bname}.txt + +printf "%-10s %-35s %10s\n" "subjID" "roi" "eff_est" > ${file_ostats} + +foreach gmroi ( ${all_gmrois} ) + echo "++ proc gmroi: ${gmroi}" + set info = `3dROIstats \ + -quiet \ + -mask ${dset_roi}"<${gmroi}>" \ + "${sdir_apres}/stats.${subj}_REML+tlrc.HEAD[${beta}]"` + + if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT + endif + + printf "%-10s %-35s %10f\n" ${subj} ${gmroi} ${info} >> ${file_ostats} + + # and prep for ttest + set file_1D = ${bname}/beta.${subj}.${gmroi}.1D + \mkdir -p ${bname} + printf "%10s\n" "${info}" > ${file_1D} +end + +# ============================================================ +echo "++ Extract ROI info for: Glasser atlas" + +# for grid reference +set dset_stats = ( ${sdir_apres}/stats.${subj}_REML+tlrc.HEAD ) + +set dset_roi = tmp_MNI_Glasser_${subj}.nii.gz +3dresample -overwrite \ + -input ${atl_glass} \ + -master ${dset_stats} \ + -rmode NN \ + -prefix ${dset_roi} + +if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT +endif + +# reattach labels and cmap +3drefit -copytables ${atl_glass} \ + ${dset_roi} +3drefit -cmap INT_CMAP \ + ${dset_roi} + +# get GM ROI list, remove 'unknown', sort +set lt_temp = tmp_MNI_Glasser_labeltable.niml.lt +3dinfo -labeltable ${dset_roi} > ${lt_temp} +set all_gmrois = `@MakeLabelTable -all_labels -labeltable ${lt_temp} \ + | grep --color=never -v "Unknown" \ + | sort` + +echo "++ Found ${#all_gmrois} ROI labels" + +# prep and clear stats file +set bname = roistats_${subj}_ROI_MNI_Glass_gmrois +set file_ostats = ${bname}.txt + +printf "%-10s %-35s %10s\n" "subjID" "roi" "eff_est" > ${file_ostats} + +foreach gmroi ( ${all_gmrois} ) + echo "++ proc gmroi: ${gmroi}" + set info = `3dROIstats \ + -quiet \ + -mask ${dset_roi}"<${gmroi}>" \ + "${sdir_apres}/stats.${subj}_REML+tlrc.HEAD[${beta}]"` + + if ( ${status} ) then + set ecode = 1 + goto COPY_AND_EXIT + endif + + printf "%-10s %-35s %10f\n" ${subj} ${gmroi} ${info} >> ${file_ostats} + + # and prep for ttest + set file_1D = ${bname}/beta.${subj}.${gmroi}.1D + \mkdir -p ${bname} + printf "%10s\n" "${info}" > ${file_1D} +end + + +if ( ${status} ) then + echo "++ FAILED RBA_PREP: ${ap_label}" + set ecode = 10 + goto COPY_AND_EXIT +else + echo "++ FINISHED RBA_PREP: ${ap_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# copy back from /lscratch to "real" location +if( ${usetemp} && -d ${sdir_rba_prep} ) then + echo "++ Used /lscratch" + echo "++ Copy from: ${sdir_rba_prep}" + echo " to: ${sdir_BW}" + \mkdir -p ${sdir_BW} + \cp -pr ${sdir_rba_prep}/* ${sdir_BW}/. + + # reset group permission + chgrp -R ${grp_own} ${sdir_BW} + chmod -R g+w ${sdir_BW} +endif +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: RBA_PREP (ecode = ${ecode})" +else + echo "++ GOOD FINISH: RBA_PREP" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71b_rba_comb.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71b_rba_comb.tcsh new file mode 100644 index 00000000..1ebb672d --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71b_rba_comb.tcsh @@ -0,0 +1,152 @@ +#!/bin/tcsh + +# RBA_COMB: combine individual data into table for RBA + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni + +# set N_threads for OpenMP +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set grp = $1 +set cond = $2 + +set ap_label = 22_ap_task +set grp_label = ${grp}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} +set dir_rba_prep = ${dir_inroot}/data_71_rba_prep/${grp_label} + +# subject directories + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz +set atl_glass = ${dir_extra}/MNI_Glasser_HCP_v1.0.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +# control variables + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# don't use lscratch here + +set usetemp = 0 + +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +cd ${dir_rba_prep} + +# ============================================================ +echo "++ Create table for: FS-2000 (Desikan Killiany)" + +set fname_dtable = table_all_ROI_FS_REN_gmrois.txt +set all_txt = ( sub*/roistats_*_ROI_FS_REN_gmrois.txt ) + +# start the table using the header from the first file... +cat ${all_txt[1]} > ${fname_dtable} + +# ... and for all others, just dump in data +foreach ii ( `seq 2 1 ${#all_txt}` ) + set txt = ${all_txt[$ii]} + echo "++ proc: ${txt}" + + tail -n+2 ${txt} >> ${fname_dtable} +end + +echo "++ Finished table: ${fname_dtable}" + +# ============================================================ +echo "++ Create table for: Glasser atlas" + +set fname_dtable = table_all_ROI_MNI_Glass_gmrois.txt +set all_txt = ( sub*/roistats_*_ROI_MNI_Glass_gmrois.txt ) + +# start the table using the header from the first file... +cat ${all_txt[1]} > ${fname_dtable} + +# ... and for all others, just dump in data +foreach ii ( `seq 2 1 ${#all_txt}` ) + set txt = ${all_txt[$ii]} + echo "++ proc: ${txt}" + + tail -n+2 ${txt} >> ${fname_dtable} +end + +echo "++ Finished table: ${fname_dtable}" + +# ----------------- + +if ( ${status} ) then + echo "++ FAILED RBA_COMB: ${ap_label}" + set ecode = 10 + goto COPY_AND_EXIT +else + echo "++ FINISHED RBA_COMB: ${ap_label}" +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# not using lscratch here +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: RBA_COMB (ecode = ${ecode})" +else + echo "++ GOOD FINISH: RBA_COMB" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71c_rba.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71c_rba.tcsh new file mode 100644 index 00000000..d0bb4c37 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/do_71c_rba.tcsh @@ -0,0 +1,123 @@ +#!/bin/tcsh + +# RBA: run RBA on the data tables that are present + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, via the corresponding run_*tcsh +# + NO session level here. + +# ----------------------------- biowulf-cmd --------------------------------- +# load modules +source /etc/profile.d/modules.csh +module load afni R + +# set N_threads for OpenMP +setenv OMP_NUM_THREADS $SLURM_CPUS_PER_TASK + +# compress BRIK files +setenv AFNI_COMPRESSOR GZIP + +# initial exit code; we don't exit at fail, to copy partial results back +set ecode = 0 +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# top level definitions (constant across demo) +# --------------------------------------------------------------------------- + +# labels +set grp = $1 +set cond = $2 +set fname_dtable = $3 +set prefix = $4 + +set ap_label = 22_ap_task +set grp_label = ${grp}.${cond} + +# upper directories +set dir_inroot = ${PWD:h} # one dir above scripts/ +set dir_log = ${dir_inroot}/logs +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_deob = ${dir_inroot}/data_02_deob +set dir_fs = ${dir_inroot}/data_12_fs +set dir_ssw = ${dir_inroot}/data_13_ssw +set dir_events = ${dir_inroot}/data_15_events +set dir_ap = ${dir_inroot}/data_${ap_label} +set dir_rba_prep = ${dir_inroot}/data_71_rba_prep/${grp_label} + +# subject directories + +# extra datasets +set dir_extra = ${dir_inroot}/extra_dsets +set template = ${dir_extra}/MNI152_2009_template_SSW.nii.gz +set grid_template = ${dir_extra}/T1.grid_template.nii.gz +set atl_glass = ${dir_extra}/MNI_Glasser_HCP_v1.0.nii.gz + +# -------------------------------------------------------------------------- +# data and control variables +# -------------------------------------------------------------------------- + +# dataset inputs + +# control variables + + +# one way of many ways to set to available number of CPUs: +# afni_check_omp respects ${OMP_NUM_THREADS} +set njobs = `afni_check_omp` + +# check available N_threads and report what is being used +set nthr_avail = `afni_system_check.py -disp_num_cpu` +set nthr_using = `afni_check_omp` + +echo "++ INFO: Using ${nthr_avail} of available ${nthr_using} threads" + +# ----------------------------- biowulf-cmd -------------------------------- +# don't use lscratch here + +set usetemp = 0 + +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# run programs +# --------------------------------------------------------------------------- + +cd ${dir_rba_prep} + +# ============================================================ + +RBA \ + -prefix ${prefix} \ + -chains 4 \ + -iterations 1000 \ + -mean 'eff_est~1+(1|roi)+(1|subjID)' \ + -sigma '1+(1|roi)+(1|subjID)' \ + -ROI 'roi' \ + -EOI 'Intercept' \ + -dataTable ${fname_dtable} + +if ( ${status} ) then + set ecode = 3 + goto COPY_AND_EXIT +endif + +# --------------------------------------------------------------------------- + +COPY_AND_EXIT: + +# ----------------------------- biowulf-cmd -------------------------------- +# not using lscratch here +# --------------------------------------------------------------------------- + +if ( ${ecode} ) then + echo "++ BAD FINISH: RBA_COMB (ecode = ${ecode})" +else + echo "++ GOOD FINISH: RBA_COMB" +endif + +exit ${ecode} + diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_01_gtkyd.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_01_gtkyd.tcsh new file mode 100644 index 00000000..f82b0478 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_01_gtkyd.tcsh @@ -0,0 +1,138 @@ +#!/bin/tcsh + +# GTKYD: Getting To Know Your Data +# -> preliminary info and QC +# -> does not use lscratch + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 01_gtkyd + +# upper directories +set dir_scr = $PWD + +# define this with abs path here +cd .. +set dir_inroot = $PWD +cd - + +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic +set dir_gtkyd = ${dir_inroot}/data_${cmd} + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# ** make the output directory here ** +\mkdir -p ${dir_gtkyd} + +# get list of all subj IDs for proc, per group +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat < ${file_all_anat} +printf "" > ${file_all_epi} + +# count the number of dsets per modality +set file_num_anat = ${dir_gtkyd}/list_num_anat.txt +set file_num_epi = ${dir_gtkyd}/list_num_epi.txt + +printf "" > ${file_num_anat} +printf "" > ${file_num_epi} + + +# loop over all subj +foreach subj ( ${all_subj} ) + echo "++ Prepare cmd for: ${subj}" + + # mirror dirs/globbing in later scripts; note that this format + # assumes there is no ses dir, which is true here + set sdir_anat = ( ${dir_basic}/${subj}/anat ) + set sdir_epi = ( ${dir_basic}/${subj}/func ) + + set dset_anat_00 = ( ${sdir_anat}/${subj}*T1w.nii.gz ) #${ses} + set dsets_epi = ( ${sdir_epi}/${subj}_*task*bold.nii* ) #${ses} + + # write out dset lists + + foreach dset ( ${dset_anat_00} ) + echo ${dset} >> ${file_all_anat} + end + + foreach dset ( ${dsets_epi} ) + echo ${dset} >> ${file_all_epi} + end + + # write out dset counts + + echo ${#dset_anat_00} >> ${file_num_anat} + echo ${#dsets_epi} >> ${file_num_epi} +end + +# set up log and run +set log = ${cdir_log}/log_${cmd}.txt + +echo "tcsh -x do_${cmd}.tcsh ${file_all_epi} \\" >> ${scr_swarm} +echo " ${file_all_anat} |& tee ${log}" >> ${scr_swarm} + + + + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=2 \ + --gb-per-process=3 \ + --time=03:59:00 \ + #--gres=lscratch:10 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_02_deob.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_02_deob.tcsh new file mode 100644 index 00000000..0258cdee --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_02_deob.tcsh @@ -0,0 +1,95 @@ +#!/bin/tcsh + +# DEOB: deoblique the anatomical and centralize grid + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 02_deob + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# get list of all subj IDs for proc +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=4 \ + --gb-per-process=2 \ + --time=00:29:00 \ + --gres=lscratch:1 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_12_fs.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_12_fs.tcsh new file mode 100644 index 00000000..04d722d7 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_12_fs.tcsh @@ -0,0 +1,95 @@ +#!/bin/tcsh + +# FS: run FreeSurfer's recon-all and AFNI's @SUMA_Make_Spec_FS. + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 12_fs + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# get list of all subj IDs for proc +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=4 \ + --gb-per-process=10 \ + --time=12:00:00 \ + --gres=lscratch:10 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_13_ssw.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_13_ssw.tcsh new file mode 100644 index 00000000..ea3fb8ca --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_13_ssw.tcsh @@ -0,0 +1,95 @@ +#!/bin/tcsh + +# SSW: run @SSwarper to skullstrip (SS) and estimate a nonlinear warp. + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 13_ssw + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# get list of all subj IDs for proc +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=16 \ + --gb-per-process=10 \ + --time=03:59:00 \ + --gres=lscratch:3 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_15_events.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_15_events.tcsh new file mode 100644 index 00000000..561e0c8f --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_15_events.tcsh @@ -0,0 +1,96 @@ +#!/bin/tcsh + +# EVENTS: create stim events files + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 15_events + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# get list of all subj IDs for proc +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=1 \ + --bundle=10 \ + --gb-per-process=1 \ + --time=00:02:00 \ + --gres=lscratch:1 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_22_ap_task.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_22_ap_task.tcsh new file mode 100644 index 00000000..0be94a2c --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_22_ap_task.tcsh @@ -0,0 +1,96 @@ +#!/bin/tcsh + +# AP_TASK: full task-based processing (voxelwise) +# with local EPI unifize + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 22_ap_task + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# get list of all subj IDs for proc +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=30:59:00 \ + --gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_23_ap_task_b.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_23_ap_task_b.tcsh new file mode 100644 index 00000000..9f75c6ce --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_23_ap_task_b.tcsh @@ -0,0 +1,96 @@ +#!/bin/tcsh + +# AP_TASK: full task-based processing (voxelwise), with blurring +# with local EPI unifize + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 23_ap_task_b + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- + +# get list of all subj IDs for proc +cd ${dir_basic} +set all_subj = ( sub-* ) +cd - + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=30:59:00 \ + --gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_52_ap_qc.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_52_ap_qc.tcsh new file mode 100644 index 00000000..fb10cd64 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_52_ap_qc.tcsh @@ -0,0 +1,114 @@ +#!/bin/tcsh + +# QC: GSSRT QC, inclusion/exclusion criteria for subj +# ** this is a group level command, doesn't loop over subjs +# ** at present, it is so quick that it doesn't swarm, either---just run + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 52_ap_qc + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + + +set log = ${cdir_log}/log_${cmd}_grp.txt + +echo "++ Simply run this relatively short command here" + +tcsh -xf ${scr_cmd} \ + |& tee ${log} + + + + + + + + + + + +exit 0 + + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=30:59:00 \ + --gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_53_ap_qc.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_53_ap_qc.tcsh new file mode 100644 index 00000000..8188d4d1 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_53_ap_qc.tcsh @@ -0,0 +1,115 @@ +#!/bin/tcsh + +# QC: GSSRT QC, inclusion/exclusion criteria for subj +# ** this is a group level command, doesn't loop over subjs +# ** at present, it is so quick that it doesn't swarm, either---just run +# -> for the 23_ap proc + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 53_ap_qc + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + + +set log = ${cdir_log}/log_${cmd}_grp.txt + +echo "++ Simply run this relatively short command here" + +tcsh -xf ${scr_cmd} \ + |& tee ${log} + + + + + + + + + + + +exit 0 + + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=30:59:00 \ + --gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_61_etac_1grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_61_etac_1grp.tcsh new file mode 100644 index 00000000..aa3bfe85 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_61_etac_1grp.tcsh @@ -0,0 +1,77 @@ +#!/bin/tcsh + +# ETAC: 3dttest with ETAC opts for a single group + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 61_etac_1grp + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + +set all_grp = ( equalRange equalIndif ) +set all_cond = ( gain loss) + +foreach grp ( ${all_grp} ) + foreach cond ( ${all_cond} ) + echo "++ Prepare cmd for: ${grp} ${cond}" + + set log = ${cdir_log}/log_${cmd}_${grp}_${cond}.txt + + # run command script (verbosely, and don't use '-e'); log terminal text. + echo "tcsh -xf ${scr_cmd} ${grp} ${cond} \\" >> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} + end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=3:59:00 \ + --gres=lscratch:50 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_62_etac_2grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_62_etac_2grp.tcsh new file mode 100644 index 00000000..d2b9a944 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_62_etac_2grp.tcsh @@ -0,0 +1,76 @@ +#!/bin/tcsh + +# ETAC: 3dttest with ETAC opts for a group contrast + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 62_etac_2grp + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + +set grp1 = equalRange +set grp2 = equalIndif +set all_cond = ( loss ) + +foreach cond ( ${all_cond} ) + echo "++ Prepare cmd for: ${grp1}-${grp2} ${cond}" + + set log = ${cdir_log}/log_${cmd}_${grp1}-${grp2}_${cond}.txt + + # run command script (verbosely, and don't use '-e'); log terminal text. + echo "tcsh -xf ${scr_cmd} ${grp1} ${grp2} ${cond} \\" >> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=3:59:00 \ + --gres=lscratch:30 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_63_ttest_1grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_63_ttest_1grp.tcsh new file mode 100644 index 00000000..d0d8cc68 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_63_ttest_1grp.tcsh @@ -0,0 +1,78 @@ +#!/bin/tcsh + +# ETAC: 3dttest with ETAC opts for a single group +# -> for 23_ap + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 63_ttest_1grp + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + +set all_grp = ( equalRange equalIndif ) +set all_cond = ( gain loss) + +foreach grp ( ${all_grp} ) + foreach cond ( ${all_cond} ) + echo "++ Prepare cmd for: ${grp} ${cond}" + + set log = ${cdir_log}/log_${cmd}_${grp}_${cond}.txt + + # run command script (verbosely, and don't use '-e'); log terminal text. + echo "tcsh -xf ${scr_cmd} ${grp} ${cond} \\" >> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} + end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=00:59:00 \ + --gres=lscratch:10 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_64_csim_1grp.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_64_csim_1grp.tcsh new file mode 100644 index 00000000..a3edb4fb --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_64_csim_1grp.tcsh @@ -0,0 +1,77 @@ +#!/bin/tcsh + +# CSIM: 3dttest with CSIM opts for a single group + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute +set cmd = 64_csim_1grp + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + +set all_grp = ( equalRange equalIndif ) +set all_cond = ( gain loss) + +foreach grp ( ${all_grp} ) + foreach cond ( ${all_cond} ) + echo "++ Prepare cmd for: ${grp} ${cond}" + + set log = ${cdir_log}/log_${cmd}_${grp}_${cond}.txt + + # run command script (verbosely, and don't use '-e'); log terminal text. + echo "tcsh -xf ${scr_cmd} ${grp} ${cond} \\" >> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} + end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +swarm \ + -f ${scr_swarm} \ + --partition=norm,quick \ + --threads-per-process=16 \ + --gb-per-process=30 \ + --time=3:59:00 \ + --gres=lscratch:30 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71a_rba_prep.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71a_rba_prep.tcsh new file mode 100644 index 00000000..2a5acb4a --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71a_rba_prep.tcsh @@ -0,0 +1,101 @@ +#!/bin/tcsh + +# RBA_PREP: make some ROI tables +# this runs for a particular *group* and *cond* + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute---and which AP results to use +set cmd = 71a_rba_prep +set ap_label = 22_ap_task + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + +set dir_ap = ${dir_inroot}/data_${ap_label} +set dir_grpqc = ${dir_ap}/QC + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# get list of all subj IDs for proc **FOR** a particular group, and +# **AFTER** dropping some in QC + +set grp = equalRange +set cond = gain +set all_subj = `cat ${dir_grpqc}/list_grp_${grp}_final.txt` + +cat <> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +# don't need to use scratch disk here, just text files +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=1 \ + --gb-per-process=3 \ + --time=00:20:00 \ + #--gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71b_rba_comb.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71b_rba_comb.tcsh new file mode 100644 index 00000000..fc5df51c --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71b_rba_comb.tcsh @@ -0,0 +1,111 @@ +#!/bin/tcsh + +# RBA_COMB: combine individual data into table for RBA +# this runs for a particular *group* and *cond* + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute---and which AP results to use +set cmd = 71b_rba_comb +set ap_label = 22_ap_task + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + +set dir_ap = ${dir_inroot}/data_${ap_label} +set dir_grpqc = ${dir_ap}/QC + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + +set grp = equalRange +set cond = gain + +set log = ${cdir_log}/log_${cmd}_grp_${grp}.${cond}.txt + + +tcsh -xf ${scr_cmd} ${grp} ${cond} \ + |& tee ${log} + + + + + + + + +exit 0 + + + + + +# ------------------------------------------------------------------------- +# build swarm command + +# loop over all subj +foreach subj ( ${all_subj} ) + echo "++ Prepare cmd for: ${subj}" + +# # loop over all ses +# cd ${dir_basic}/${subj} +# set all_ses = ( ses-* ) +# cd - + +# foreach ses ( ${all_ses} ) + set log = ${cdir_log}/log_${cmd}_${subj}.txt #_${ses} + + # run command script (verbosely, and don't use '-e'); log terminal text. + # no ${ses} var + echo "tcsh -xf ${scr_cmd} ${subj} ${grp} ${cond} \\" >> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +# end +end + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +# don't need to use scratch disk here, just text files +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=1 \ + --gb-per-process=3 \ + --time=00:10:00 \ + #--gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71c_rba.tcsh b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71c_rba.tcsh new file mode 100644 index 00000000..0e527585 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_biowulf/run_71c_rba.tcsh @@ -0,0 +1,87 @@ +#!/bin/tcsh + +# RBA: run RBA on datatable +# this runs for a particular *group* and *cond* + +# NOTES +# +# + This is a Biowulf script (has slurm stuff) +# + Run this script in the scripts/ dir, to execute the corresponding do_*tcsh +# NO session level here + +# To execute: +# tcsh RUN_SCRIPT_NAME + +# -------------------------------------------------------------------------- + +# specify script to execute---and which AP results to use +set cmd = 71c_rba +set ap_label = 22_ap_task + +# upper directories +set dir_scr = $PWD +set dir_inroot = .. +set dir_log = ${dir_inroot}/logs +set dir_swarm = ${dir_inroot}/swarms +set dir_store = /data/SSCC_NARPS/globus_sync/ds001205 # data on biowulf +set dir_basic = ${dir_store} #${dir_inroot}/data_00_basic + +set dir_ap = ${dir_inroot}/data_${ap_label} +set dir_grpqc = ${dir_ap}/QC + +# running +set cdir_log = ${dir_log}/logs_${cmd} +set scr_swarm = ${dir_swarm}/swarm_${cmd}.txt +set scr_cmd = ${dir_scr}/do_${cmd}.tcsh + +# -------------------------------------------------------------------------- + +\mkdir -p ${cdir_log} +\mkdir -p ${dir_swarm} + +# clear away older swarm script +if ( -e ${scr_swarm} ) then + \rm ${scr_swarm} +endif + +# -------------------------------------------------------------------------- +# simply run, don't swarm + +set grp = equalRange +set cond = gain + +cd ${dir_inroot}/data_71_rba_prep/${grp}.${cond} +set bpath = $PWD +set all_dtable = ( table*txt ) +cd - + +foreach dtable ( ${all_dtable} ) + set bname = ${dtable:t:r} + set opref = fit_rba.${bname}.dat + + set log = ${cdir_log}/log_${cmd}_grp_${grp}.${cond}.${bname}.txt + + echo "tcsh -xf ${scr_cmd} ${grp} ${cond} \\" >> ${scr_swarm} + echo " ${bpath}/${dtable} ${opref} \\" >> ${scr_swarm} + echo " |& tee ${log}" >> ${scr_swarm} +end + + +# ------------------------------------------------------------------------- +# run swarm command +cd ${dir_scr} + +echo "++ And start swarming: ${scr_swarm}" + +# don't need to use scratch disk here, just text files +swarm \ + -f ${scr_swarm} \ + --partition=norm \ + --threads-per-process=8 \ + --gb-per-process=3 \ + --time=10:59:00 \ + #--gres=lscratch:100 \ + --logdir=${cdir_log} \ + --job-name=job_${cmd} \ + --merge-output \ + --usecsh diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/README.txt b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/README.txt new file mode 100644 index 00000000..3d752105 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/README.txt @@ -0,0 +1,54 @@ +Some supplementary scripts for combining NARPS Teams' results and +generating images. + +This assumes all the NARPS Teams' results have been downloaded and +unpacked and cleaned up *a lot* for names/formatting: dirs to have +names like NARPS-????; to have all MNI-standard space NIFTI's have +[qs]form_code = 4; to remove not contain non-ASCII characters from +file/dir names; to have straightforward/unambiguous/nonoverlapping +names of files. + +Most scripts loop over each Hypothesis. + +--------------------------------------------------------------------------- + ++ do_00_qsform_codes.tcsh + + A preliminary code used to fix [sq]form_code values, as necessary; + this one was copied into a Team's directory of data to use. + ++ do_01a_proc_sign_flip.tcsh + + Apply rules for which datasets needed to have signs flipped, + according to their submitted information about what a "positive" + stat value meant. + ++ do_01b_resam_same_grid.tcsh + + Resample datasets to a 2x2x2 mm**3 grid, for visualization and + correlation purposes. + ++ do_02_pc_with_sign0.tcsh + + Do PCA across teams results, with the first PC used to order + datasets for a given hypothesis, for visualization purposes. + ++ do_03_sort_by_sim2pc.tcsh + + Apply ordering based on similarity to first PC. + ++ do_04_make_imgs_?.tcsh + + Make the montage of images of a hypothesis across all Teams (for + both transparent and opaque thresholding). + ++ do_10_within_brain_masks.tcsh + + Make masks of varying varieties: where data is nonzero; where data + is >thr; where data is <-thr; where data is > |thr|. To be used in + next step (Dice calcs, in particular). + ++ do_11_dice_pearson_coeffs.tcsh + + Do Dice and Pearson calcs, and generate similarity matrices (both + for whole-brain and zoomed-in results). diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_00_qsform_codes.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_00_qsform_codes.tcsh new file mode 100644 index 00000000..67551151 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_00_qsform_codes.tcsh @@ -0,0 +1,23 @@ +#!/bin/tcsh + +# correct qform_code and sform_code for these dsets to be 4 (=MNI) + +# to use, copy it into the directory holding data + +echo "++ gunzip all dsets" +gunzip *.nii.gz + +set all_nii = ( *nii ) + +foreach nii ( ${all_nii} ) + echo "++ Fixing [qs]form_code for dset: ${nii}" + nifti_tool \ + -mod_hdr -mod_field qform_code 4 \ + -mod_field sform_code 4 \ + -infiles ${nii} \ + -overwrite +end + + +echo "++ gzip all dsets" +gzip *nii diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_01a_proc_sign_flip.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_01a_proc_sign_flip.tcsh new file mode 100644 index 00000000..0de83e8f --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_01a_proc_sign_flip.tcsh @@ -0,0 +1,47 @@ +#!/bin/tcsh + +# the subsets of hyps where signflips were + +# each dset that is flipped is for single hypothesis here + +# comment out if it needs to be run again: +echo "** should not be run multiple times **" +exit 0 + + + +set all_num = ( 5 6 9 ) + + +foreach num ( ${all_num} ) + echo "------------------- hyp = ${num} ---------------------------------" + + # flip signs for teams that have a "-1" in these text files + # (excluding teams that were excluded from unthresholded maps) + set all_teamid = `cat sign_info_hyp${num}.txt | \ + grep --color=never "\-1" | \ + grep --color=never -v "X1Z4" | \ + grep --color=never -v "1K0E" | \ + grep --color=never -v "16IN" | \ + awk '{print $1}'` + + foreach teamid ( ${all_teamid} ) + echo "++ ${teamid}" + set dir_team = `find ./ -maxdepth 1 -type d -name "NARP*${teamid}" \ + | cut -b3-` + set orig_dset = `\ls ${dir_team}/*hyp*${num}*unthr*.nii*` + echo ${orig_dset} + + set dir_flip = ${dir_team}/store_unflip_HYP-${num} + echo ${dir_flip} + \mkdir -p ${dir_flip} + \mv ${orig_dset} ${dir_flip}/. + set unflip_dset = `\ls ${dir_flip}/*hyp*${num}*unthr*.nii*` + + 3dcalc \ + -a ${unflip_dset} \ + -expr '-1*a' \ + -prefix ${orig_dset} + end + +end diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_01b_resam_same_grid.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_01b_resam_same_grid.tcsh new file mode 100644 index 00000000..bac45734 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_01b_resam_same_grid.tcsh @@ -0,0 +1,47 @@ +#!/bin/tcsh + +set dset_grid = ~/REF_TEMPLATES/MNI152_mask_222.nii.gz + +set all_num = `seq 1 1 9` + +foreach num ( ${all_num} ) + set all_dset = `ls -1 NARP*/*hyp*${num}*unthr*.nii*` + + set odir = res222_narps_hyp${num} + \mkdir -p ${odir} + + foreach nn ( `seq 1 1 ${#all_dset}` ) + set nnn = `printf "%03d" $nn` + set ff = "${all_dset[$nn]}" + + # base name of vol, and make a list of all prefixes for later + set ibase = `3dinfo -prefix_noext "${ff}"` + set idir = `dirname "${ff}"` + set iid = `printf "${idir}" | tail -c 4` + + set dset_res = "${odir}/dset_${iid}_${idir}__${ibase}.nii.gz" + + echo "++ dset nam: '${ff}'" + echo "++ dset_res: '${dset_res}'" + 3dresample -echo_edu \ + -overwrite \ + -prefix "${dset_res}" \ + -master "${dset_grid}" \ + -input "${ff}" + + if ( $status ) then + echo "** ERROR: crash here" + exit 1 + endif + end + + # concatenate everything + cd ${odir} + 3dTcat -prefix DSET_ALL_hyp${num}_222.nii.gz dset_* + cd - +end + + + + + diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_02_pc_with_sign0.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_02_pc_with_sign0.tcsh new file mode 100644 index 00000000..4ff2a8b4 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_02_pc_with_sign0.tcsh @@ -0,0 +1,62 @@ +#!/bin/tcsh + + +set dset_mask = ~/REF_TEMPLATES/MNI152_mask_222.nii.gz + +set all_num = `seq 1 1 9` + +foreach num ( ${all_num} ) + echo "++++++++++++++++++++++++ num: ${num} ++++++++++++++++++++++++++++" + + set dir_hyp = res222_narps_hyp${num} + set grp_dset = ( ${dir_hyp}/DSET*nii* ) + + 3dpc \ + -overwrite \ + -mask ${dset_mask} \ + -pcsave 5 \ + -prefix ${dir_hyp}/PC_hyp${num} \ + ${grp_dset} + + # ============================================================== + # get ref dset to check sign of [0]th PC + + set ref_dset = `ls -1 BLARG_TEST_DSET*/*hyp*${num}*unthr*.nii*` + if ( "${#ref_dset}" != "1" ) then + echo "** ERROR: found too many ref dsets, or not enough: ${#ref_dset}" + exit + else + 3dresample \ + -overwrite \ + -prefix tmp_REF_DSET.nii.gz \ + -master ${dset_mask} \ + -input "${ref_dset}" + endif + + # which is inset and which is refset matters here, solely because + # of the file we parse later + 3dMatch -echo_edu \ + -overwrite \ + -mask ${dset_mask} \ + -refset ${dir_hyp}/PC_hyp${num}+tlrc \ + -inset tmp_REF_DSET.nii.gz \ + -prefix tmp_MATCHED + + if ( $status ) then + echo "** ERROR: crash here" + exit 1 + endif + + # get the corr value, and see if it is pos or neg + set vals = `cat tmp_MATCHED_REF*.vals` + set vvv = "${vals[3]}" + set signum = `echo "(-1)^(1+ (${vvv} > 0))" | bc` + echo "++ signum is: '${signum}'" + # ... and flip the sign of the PC dset, if necessary + 3dcalc -echo_edu \ + -overwrite \ + -a ${dir_hyp}/PC_hyp${num}+tlrc \ + -expr "${signum}*a" \ + -prefix ${dir_hyp}/PC_hyp${num}_sign0.nii.gz + +end diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_03_sort_by_sim2pc.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_03_sort_by_sim2pc.tcsh new file mode 100644 index 00000000..87f2ab1e --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_03_sort_by_sim2pc.tcsh @@ -0,0 +1,64 @@ +#!/bin/tcsh + +set dset_grid = ~/REF_TEMPLATES/MNI152_mask_222.nii.gz + +set all_num = `seq 1 1 9` + +foreach num ( ${all_num} ) + + set ref_dset = `ls -1 res222_narps_hyp${num}/PC_hyp${num}_sign0.nii.gz` + + if ( "${#ref_dset}" != "1" ) then + echo "** ERROR: found too many ref dsets, or not enough: ${#ref_dset}" + exit + endif + + # now we make (sorted) lists of both the original dsets... + set ofile = "list_match_${num}.txt" + set ofile2 = "list_match_${num}_sort.txt" + printf "" > ${ofile} + # ... and the resampled ones + set rfile = "list_match_${num}_RES.txt" + set rfile2 = "list_match_${num}_RES_sort.txt" + printf "" > ${rfile} + + set all_dset = ( res222_narps_hyp${num}/dset* ) + + foreach nn ( `seq 1 1 ${#all_dset}` ) + set nnn = `printf "%03d" $nn` + set ff = "${all_dset[$nn]}" + # base name of vol, from which we extract 4char ID + set ibase = `3dinfo -prefix_noext "${ff}"` + set iid = `printf "${ibase}" | head -c 9 | tail -c 4` + + 3dMatch -echo_edu \ + -overwrite \ + -inset "${ff}" \ + -refset ${ref_dset} \ + -mask ${dset_grid} \ + -prefix tmp_MATCHED + + if ( $status ) then + echo "** ERROR: crash here" + exit 1 + endif + + set vals = `cat tmp_MATCHED_REF*.vals` + set vvv = "${vals[3]}" + + set orig_dset = `\ls -1 NARPS-${iid}/*hyp*${num}*unthr*.nii*` + + printf "%0.3f %s\n" ${vvv} "${orig_dset}" >> ${ofile} + printf "%0.3f %s\n" ${vvv} "${ff}" >> ${rfile} + + end + + sort -nr < ${ofile} > ${ofile2} + sort -nr < ${rfile} > ${rfile2} + +end + + + + + diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_A.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_A.tcsh new file mode 100644 index 00000000..7e640836 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_A.tcsh @@ -0,0 +1,133 @@ +#!/bin/tcsh + +# this is a semi-slow process, so divide up into three batches + +set here = $PWD # for path; trivial, could be changed +set ref = ~/REF_TEMPLATES/MNI152_T1_2009c+tlrc.HEAD + +# loop over all hypotheses +foreach num ( `seq 1 1 3` ) + set hyp = "hypo${num}" + set ilist = `cat list_match_${num}_sort.txt | awk '{print $2}'` + # better than `\ls */*hyp*${num}_unthresh*nii*` + + set lcol = ( 255 255 255 ) # RGB line color bt image panels + set odir = ${here}/QC_${hyp} # output dir for images + + \mkdir ${odir} + + # ========================================================================= + + set allbase = () + set allid = () + set allfile = () + + if ( 1 ) then + foreach nn ( `seq 1 1 ${#ilist}` ) + set nnn = `printf "%03d" $nn` + set ff = "${ilist[$nn]}" + # base name of vol, and make a list of all prefixes for later + set ibase = `3dinfo -prefix_noext "${ff}"` + set idir = `dirname "${ff}"` + set iid = `printf "${idir}" | tail -c 4` + + set allbase = ( ${allbase} ${ibase} ) + set allid = ( ${allid} ${iid} ) + set allfile = ( ${allfile} ${ff} ) + + echo "++ iid = '${iid}'; ibase = '${ibase}'; idir = '${idir}'" + + if ( 1 ) then + ### Make a montage of the zeroth brick of each image. + # Some fun-ness here: part of each file's name is added to the + # label string shown in each panel. + # Note: these olay datasets are unclustered and unmasked. + @chauffeur_afni \ + -ulay ${ref} \ + -ulay_range "2%" "110%" \ + -olay ${ff} \ + -set_subbricks -1 0 0 \ + -func_range 5 \ + -thr_olay 3 \ + -cbar Reds_and_Blues_Inv \ + -olay_alpha Linear \ + -olay_boxed Yes \ + -opacity 7 \ + -prefix ${odir}/img_${nnn}_alpha_${iid} \ + -montx 1 -monty 1 \ + -set_dicom_xyz 5 18 18 \ + -set_xhairs OFF \ + -label_string "::${iid}" \ + -label_mode 1 -label_size 3 \ + -do_clean + + @chauffeur_afni \ + -ulay ${ref} \ + -ulay_range "2%" "110%" \ + -olay ${ff} \ + -set_subbricks -1 0 0 \ + -func_range 5 \ + -thr_olay 3 \ + -cbar Reds_and_Blues_Inv \ + -olay_alpha No \ + -olay_boxed No \ + -opacity 7 \ + -prefix ${odir}/img_${nnn}_psi_${iid} \ + -montx 1 -monty 1 \ + -set_dicom_xyz 5 18 18 \ + -set_xhairs OFF \ + -label_string "::${iid}" \ + -label_mode 1 -label_size 3 \ + -do_clean + endif + end + endif + + # ========================================================================= + + # get a good number of rows/cols for this input + + set nallbase = ${#allbase} + adjunct_calc_mont_dims.py ${nallbase} __tmp_${hyp} + set dims = `tail -n 1 __tmp_${hyp}` + + # ========================================================================= + + # output subj list + + set file_subj = "${odir}/list_of_all_subj.txt" + printf "" > ${file_subj} + foreach ii ( `seq 1 1 ${nallbase}` ) + printf "%4s %30s\n" ${allid[$ii]} ${allfile[$ii]} >> ${file_subj} + end + + # ======================================================================== + + foreach ss ( "sag" "cor" "axi" ) + # Combine alpha-thresholded images + 2dcat \ + -echo_edu \ + -gap 5 \ + -gap_col ${lcol} \ + -ny ${dims[4]} \ + -nx ${dims[3]} \ + -zero_wrap \ + -prefix ${odir}/ALL_alpha_${hyp}_sview_${ss}.jpg \ + ${odir}/img_*_alpha*${ss}* + + # Combine non-alpha-thresholded images + 2dcat \ + -echo_edu \ + -gap 5 \ + -gap_col ${lcol} \ + -ny ${dims[4]} \ + -nx ${dims[3]} \ + -zero_wrap \ + -prefix ${odir}/ALL_psi_${hyp}_sview_${ss}.jpg \ + ${odir}/img_*_psi*${ss}* + + end + +end + + diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_B.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_B.tcsh new file mode 100644 index 00000000..5446beea --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_B.tcsh @@ -0,0 +1,133 @@ +#!/bin/tcsh + +# this is a semi-slow process, so divide up into three batches + +set here = $PWD # for path; trivial, could be changed +set ref = ~/REF_TEMPLATES/MNI152_T1_2009c+tlrc.HEAD + +# loop over all hypotheses +foreach num ( `seq 4 1 6` ) + set hyp = "hypo${num}" + set ilist = `cat list_match_${num}_sort.txt | awk '{print $2}'` + # better than `\ls */*hyp*${num}_unthresh*nii*` + + set lcol = ( 255 255 255 ) # RGB line color bt image panels + set odir = ${here}/QC_${hyp} # output dir for images + + \mkdir ${odir} + + # ========================================================================= + + set allbase = () + set allid = () + set allfile = () + + if ( 1 ) then + foreach nn ( `seq 1 1 ${#ilist}` ) + set nnn = `printf "%03d" $nn` + set ff = "${ilist[$nn]}" + # base name of vol, and make a list of all prefixes for later + set ibase = `3dinfo -prefix_noext "${ff}"` + set idir = `dirname "${ff}"` + set iid = `printf "${idir}" | tail -c 4` + + set allbase = ( ${allbase} ${ibase} ) + set allid = ( ${allid} ${iid} ) + set allfile = ( ${allfile} ${ff} ) + + echo "++ iid = '${iid}'; ibase = '${ibase}'; idir = '${idir}'" + + if ( 1 ) then + ### Make a montage of the zeroth brick of each image. + # Some fun-ness here: part of each file's name is added to the + # label string shown in each panel. + # Note: these olay datasets are unclustered and unmasked. + @chauffeur_afni \ + -ulay ${ref} \ + -ulay_range "2%" "110%" \ + -olay ${ff} \ + -set_subbricks -1 0 0 \ + -func_range 5 \ + -thr_olay 3 \ + -cbar Reds_and_Blues_Inv \ + -olay_alpha Linear \ + -olay_boxed Yes \ + -opacity 7 \ + -prefix ${odir}/img_${nnn}_alpha_${iid} \ + -montx 1 -monty 1 \ + -set_dicom_xyz 5 18 18 \ + -set_xhairs OFF \ + -label_string "::${iid}" \ + -label_mode 1 -label_size 3 \ + -do_clean + + @chauffeur_afni \ + -ulay ${ref} \ + -ulay_range "2%" "110%" \ + -olay ${ff} \ + -set_subbricks -1 0 0 \ + -func_range 5 \ + -thr_olay 3 \ + -cbar Reds_and_Blues_Inv \ + -olay_alpha No \ + -olay_boxed No \ + -opacity 7 \ + -prefix ${odir}/img_${nnn}_psi_${iid} \ + -montx 1 -monty 1 \ + -set_dicom_xyz 5 18 18 \ + -set_xhairs OFF \ + -label_string "::${iid}" \ + -label_mode 1 -label_size 3 \ + -do_clean + endif + end + endif + + # ========================================================================= + + # get a good number of rows/cols for this input + + set nallbase = ${#allbase} + adjunct_calc_mont_dims.py ${nallbase} __tmp_${hyp} + set dims = `tail -n 1 __tmp_${hyp}` + + # ========================================================================= + + # output subj list + + set file_subj = "${odir}/list_of_all_subj.txt" + printf "" > ${file_subj} + foreach ii ( `seq 1 1 ${nallbase}` ) + printf "%4s %30s\n" ${allid[$ii]} ${allfile[$ii]} >> ${file_subj} + end + + # ======================================================================== + + foreach ss ( "sag" "cor" "axi" ) + # Combine alpha-thresholded images + 2dcat \ + -echo_edu \ + -gap 5 \ + -gap_col ${lcol} \ + -ny ${dims[4]} \ + -nx ${dims[3]} \ + -zero_wrap \ + -prefix ${odir}/ALL_alpha_${hyp}_sview_${ss}.jpg \ + ${odir}/img_*_alpha*${ss}* + + # Combine non-alpha-thresholded images + 2dcat \ + -echo_edu \ + -gap 5 \ + -gap_col ${lcol} \ + -ny ${dims[4]} \ + -nx ${dims[3]} \ + -zero_wrap \ + -prefix ${odir}/ALL_psi_${hyp}_sview_${ss}.jpg \ + ${odir}/img_*_psi*${ss}* + + end + +end + + diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_C.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_C.tcsh new file mode 100644 index 00000000..72b28b4e --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_04_make_imgs_C.tcsh @@ -0,0 +1,133 @@ +#!/bin/tcsh + +# this is a semi-slow process, so divide up into three batches + +set here = $PWD # for path; trivial, could be changed +set ref = ~/REF_TEMPLATES/MNI152_T1_2009c+tlrc.HEAD + +# loop over all hypotheses +foreach num ( `seq 7 1 9` ) + set hyp = "hypo${num}" + set ilist = `cat list_match_${num}_sort.txt | awk '{print $2}'` + # better than `\ls */*hyp*${num}_unthresh*nii*` + + set lcol = ( 255 255 255 ) # RGB line color bt image panels + set odir = ${here}/QC_${hyp} # output dir for images + + \mkdir ${odir} + + # ========================================================================= + + set allbase = () + set allid = () + set allfile = () + + if ( 1 ) then + foreach nn ( `seq 1 1 ${#ilist}` ) + set nnn = `printf "%03d" $nn` + set ff = "${ilist[$nn]}" + # base name of vol, and make a list of all prefixes for later + set ibase = `3dinfo -prefix_noext "${ff}"` + set idir = `dirname "${ff}"` + set iid = `printf "${idir}" | tail -c 4` + + set allbase = ( ${allbase} ${ibase} ) + set allid = ( ${allid} ${iid} ) + set allfile = ( ${allfile} ${ff} ) + + echo "++ iid = '${iid}'; ibase = '${ibase}'; idir = '${idir}'" + + if ( 1 ) then + ### Make a montage of the zeroth brick of each image. + # Some fun-ness here: part of each file's name is added to the + # label string shown in each panel. + # Note: these olay datasets are unclustered and unmasked. + @chauffeur_afni \ + -ulay ${ref} \ + -ulay_range "2%" "110%" \ + -olay ${ff} \ + -set_subbricks -1 0 0 \ + -func_range 5 \ + -thr_olay 3 \ + -cbar Reds_and_Blues_Inv \ + -olay_alpha Linear \ + -olay_boxed Yes \ + -opacity 7 \ + -prefix ${odir}/img_${nnn}_alpha_${iid} \ + -montx 1 -monty 1 \ + -set_dicom_xyz 5 18 18 \ + -set_xhairs OFF \ + -label_string "::${iid}" \ + -label_mode 1 -label_size 3 \ + -do_clean + + @chauffeur_afni \ + -ulay ${ref} \ + -ulay_range "2%" "110%" \ + -olay ${ff} \ + -set_subbricks -1 0 0 \ + -func_range 5 \ + -thr_olay 3 \ + -cbar Reds_and_Blues_Inv \ + -olay_alpha No \ + -olay_boxed No \ + -opacity 7 \ + -prefix ${odir}/img_${nnn}_psi_${iid} \ + -montx 1 -monty 1 \ + -set_dicom_xyz 5 18 18 \ + -set_xhairs OFF \ + -label_string "::${iid}" \ + -label_mode 1 -label_size 3 \ + -do_clean + endif + end + endif + + # ========================================================================= + + # get a good number of rows/cols for this input + + set nallbase = ${#allbase} + adjunct_calc_mont_dims.py ${nallbase} __tmp_${hyp} + set dims = `tail -n 1 __tmp_${hyp}` + + # ========================================================================= + + # output subj list + + set file_subj = "${odir}/list_of_all_subj.txt" + printf "" > ${file_subj} + foreach ii ( `seq 1 1 ${nallbase}` ) + printf "%4s %30s\n" ${allid[$ii]} ${allfile[$ii]} >> ${file_subj} + end + + # ======================================================================== + + foreach ss ( "sag" "cor" "axi" ) + # Combine alpha-thresholded images + 2dcat \ + -echo_edu \ + -gap 5 \ + -gap_col ${lcol} \ + -ny ${dims[4]} \ + -nx ${dims[3]} \ + -zero_wrap \ + -prefix ${odir}/ALL_alpha_${hyp}_sview_${ss}.jpg \ + ${odir}/img_*_alpha*${ss}* + + # Combine non-alpha-thresholded images + 2dcat \ + -echo_edu \ + -gap 5 \ + -gap_col ${lcol} \ + -ny ${dims[4]} \ + -nx ${dims[3]} \ + -zero_wrap \ + -prefix ${odir}/ALL_psi_${hyp}_sview_${ss}.jpg \ + ${odir}/img_*_psi*${ss}* + + end + +end + + diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_10_within_brain_masks.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_10_within_brain_masks.tcsh new file mode 100644 index 00000000..2930421c --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_10_within_brain_masks.tcsh @@ -0,0 +1,98 @@ +#!/bin/tcsh + +# make masks of data within MNI-reference mask for cases of: +# + where data is nonzero +# + where data is >thr +# + where data is <-thr +# + where data is > |thr| + +set dset_grid = ~/REF_TEMPLATES/MNI152_mask_222.nii.gz + +set thrval = 3 + +set all_num = `seq 1 1 9` + +foreach num ( ${all_num} ) + set all_dset = `ls -1 NARP*/*hyp*${num}*unthr*.nii*` + + set odir = res222_narps_hyp${num} + \mkdir -p ${odir} + + foreach nn ( `seq 1 1 ${#all_dset}` ) + set nnn = `printf "%03d" $nn` + set ff = "${all_dset[$nn]}" + + # base name of vol, and make a list of all prefixes for later + set ibase = `3dinfo -prefix_noext "${ff}"` + set idir = `dirname "${ff}"` + set iid = `printf "${idir}" | tail -c 4` + + # the specific input dset, which was created earlier with this + # recipe + set dset_res = "${odir}/dset_${iid}_${idir}__${ibase}.nii.gz" + + set dset_m0 = "${odir}/mask_00_${iid}_wbbool.nii.gz" + set dset_m1 = "${odir}/mask_01_${iid}_posthr.nii.gz" + set dset_m2 = "${odir}/mask_02_${iid}_negthr.nii.gz" + set dset_m3 = "${odir}/mask_03_${iid}_allthr.nii.gz" + + echo "++ dset name: '${ff}'" + echo "++ dset_m0 : '${dset_m0}'" + 3dcalc -echo_edu \ + -overwrite \ + -a "${dset_res}" \ + -b "${dset_grid}" \ + -expr "bool(a)*step(b)" \ + -prefix "${dset_m0}" + + if ( $status ) then + echo "** ERROR: crash here" + exit 10 + endif + + echo "++ dset_m1 : '${dset_m1}'" + 3dcalc \ + -overwrite \ + -a "${dset_res}" \ + -b "${dset_grid}" \ + -expr "ispositive(a-${thrval})*step(b)" \ + -prefix "${dset_m1}" + + if ( $status ) then + echo "** ERROR: crash here" + exit 11 + endif + + echo "++ dset_m1 : '${dset_m2}'" + 3dcalc \ + -overwrite \ + -a "${dset_res}" \ + -b "${dset_grid}" \ + -expr "isnegative(a+${thrval})*step(b)" \ + -prefix "${dset_m2}" + + if ( $status ) then + echo "** ERROR: crash here" + exit 12 + endif + + echo "++ dset_m1 : '${dset_m3}'" + 3dcalc \ + -overwrite \ + -a "${dset_res}" \ + -b "${dset_grid}" \ + -expr "step(ispositive(a-${thrval})+isnegative(a+${thrval}))*step(b)" \ + -prefix "${dset_m3}" + + if ( $status ) then + echo "** ERROR: crash here" + exit 13 + endif + + end +end + + + + + diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_11_dice_pearson_coeffs.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_11_dice_pearson_coeffs.tcsh new file mode 100644 index 00000000..27fa407e --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_teams/do_11_dice_pearson_coeffs.tcsh @@ -0,0 +1,159 @@ +#!/bin/tcsh + +# make masks of data within MNI-reference mask for cases of: +# + where data is nonzero +# + where data is >thr +# + where data is <-thr +# + where data is > |thr| + +# make corr mats and images + +set dset_grid = ~/REF_TEMPLATES/MNI152_mask_222.nii.gz +set dset_grid_zoom = narps_mni_amyg_vmpfc_vstriatum_FS_resample_to_res_INFL1_res222_mskd.nii.gz # inflated version of Hyp #1&3 ROIs, res to 2x2x2 and mskd in WB mask + +set thrval = 3 + +set all_num = `seq 1 1 9` + +foreach num ( ${all_num} ) + + # get the list of dsets, ordered, from *this* file + set all_dset = `cat list_match_${num}_sort.txt | awk '{print $2}'` + set odir = res222_narps_hyp${num} + + echo "++ Hyp number : ${num}" + echo "++ N all_dset : ${#all_dset}" + echo "++ all_dset : ${all_dset}" + + # parse names, one by one; accumulate files in order, too + set all_iid = () + set all_dset_res = () + set all_dset_m0 = () + set all_dset_m1 = () + set all_dset_m2 = () + set all_dset_m3 = () + foreach nn ( `seq 1 1 ${#all_dset}` ) + set nnn = `printf "%03d" $nn` + set ff = "${all_dset[$nn]}" + + # base name of vol, and make a list of all prefixes for later + set ibase = `3dinfo -prefix_noext "${ff}"` + set idir = `dirname "${ff}"` + set iid = `printf "${idir}" | tail -c 4` + + # each is a useful input dset here + set dset_res = "${odir}/dset_${iid}_${idir}__${ibase}.nii.gz" + set dset_m0 = "${odir}/mask_00_${iid}_wbbool.nii.gz" + set dset_m1 = "${odir}/mask_01_${iid}_posthr.nii.gz" + set dset_m2 = "${odir}/mask_02_${iid}_negthr.nii.gz" + set dset_m3 = "${odir}/mask_03_${iid}_allthr.nii.gz" + + # the accumulating lists + set all_iid = ( ${all_iid} ${iid} ) + set all_dset_res = ( ${all_dset_res} ${dset_res} ) + set all_dset_m0 = ( ${all_dset_m0} ${dset_m0} ) + set all_dset_m1 = ( ${all_dset_m1} ${dset_m1} ) + set all_dset_m2 = ( ${all_dset_m2} ${dset_m2} ) + set all_dset_m3 = ( ${all_dset_m3} ${dset_m3} ) + end + + # ====================================================================== + # create *.netcc file: for Dice coef, both WB and zoomed region + + set onetcc = ${odir}/matrix_dice_hyp${num}.netcc + + printf "" > ${onetcc} + + # header + printf "# %d # Number of network ROIs\n" "${#all_dset}" >> ${onetcc} + printf "# %d # Number of netcc matrices\n" "6" >> ${onetcc} + printf "# WITH_ROI_LABELS\n" >> ${onetcc} + + # 'ROI' (=dset) labels, numbers + printf " ${all_iid}\n" >> ${onetcc} + echo " `seq 1 1 ${#all_dset}`" >> ${onetcc} + + printf "# Dice_pos\n" + printf "# Dice_pos\n" >> ${onetcc} + 3ddot -full -dodice ${all_dset_m1} >> ${onetcc} + + printf "# Dice_pos, VMPFC and VST\n" + printf "# Dice_pos, VMPFC and VST\n" >> ${onetcc} + 3ddot -full -dodice -mask ${dset_grid_zoom} \ + ${all_dset_m1} >> ${onetcc} + + printf "# Dice_neg\n" + printf "# Dice_neg\n" >> ${onetcc} + 3ddot -full -dodice ${all_dset_m2} >> ${onetcc} + + printf "# Dice_neg\n" + printf "# Dice_neg, VMPFC and VST\n" >> ${onetcc} + 3ddot -full -dodice -mask ${dset_grid_zoom} \ + ${all_dset_m2} >> ${onetcc} + + printf "# Dice_all\n" + printf "# Dice_all\n" >> ${onetcc} + 3ddot -full -dodice ${all_dset_m3} >> ${onetcc} + + printf "# Dice_all\n" + printf "# Dice_all, VMPFC and VST\n" >> ${onetcc} + 3ddot -full -dodice -mask ${dset_grid_zoom} \ + ${all_dset_m3} >> ${onetcc} + + fat_mat2d_plot.py \ + -input ${onetcc} \ + -ftype svg \ + -cbar Reds \ + -vmin 0 \ + -vmax 1 + fat_mat2d_plot.py \ + -input ${onetcc} \ + -ftype tif \ + -cbar Reds \ + -vmin 0 \ + -vmax 1 + + # ====================================================================== + # create *.netcc file: for Continuous, both WB and zoomed region + + set onetcc = ${odir}/matrix_cont_hyp${num}.netcc + printf "" > ${onetcc} + + # header + printf "# %d # Number of network ROIs\n" "${#all_dset}" >> ${onetcc} + printf "# %d # Number of netcc matrices\n" "2" >> ${onetcc} + printf "# WITH_ROI_LABELS\n" >> ${onetcc} + + # 'ROI' (=dset) labels, numbers + printf " ${all_iid}\n" >> ${onetcc} + echo " `seq 1 1 ${#all_dset}`" >> ${onetcc} + + printf "# Corr_coef\n" + printf "# Corr_coef\n" >> ${onetcc} + 3ddot -full -docor -mask ${dset_grid} \ + ${all_dset_res} >> ${onetcc} + + printf "# Corr_coef\n" + printf "# Corr_coef, VMPFC and VST\n" >> ${onetcc} + 3ddot -full -docor -mask ${dset_grid_zoom} \ + ${all_dset_res} >> ${onetcc} + + fat_mat2d_plot.py \ + -input ${onetcc} \ + -ftype svg \ + -cbar seismic \ + -vmin -1 \ + -vmax 1 + fat_mat2d_plot.py \ + -input ${onetcc} \ + -ftype tif \ + -cbar seismic \ + -vmin -1 \ + -vmax 1 +end + + + +echo "++ DONE" + +exit 0 diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_vox/README.txt b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_vox/README.txt new file mode 100644 index 00000000..ff980bad --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_vox/README.txt @@ -0,0 +1,22 @@ +Some supplementary scripts for generating images, using voxelwise +processing results (and supplementary ROI maps and template). + +Mainly provides some examples of running @chauffeur_afni. + +--------------------------------------------------------------------------- + ++ do_22_view_wb_TTEST.tcsh + + Used to make the panels in Fig. 2. + ++ do_13_view_zoom.tcsh + + Used to setup suma+afni to make panels A and B in Fig. 3 (requires a + bit of button-pushing to set line colors, as described in comments). + ++ do_06_clust_olap.tcsh + + Used to make a cluster table report of ROI overlaps, such as in + Table 2 of the "Highlight, Don't Hide" paper. This script has been + updated with a note about using FreeSurfer parcellations for ROI + overlap reference, too. diff --git a/narps_open/pipelines/team_I9D6/scripts_suppl_proc_vox/do_06_clust_olap.tcsh b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_vox/do_06_clust_olap.tcsh new file mode 100644 index 00000000..70899416 --- /dev/null +++ b/narps_open/pipelines/team_I9D6/scripts_suppl_proc_vox/do_06_clust_olap.tcsh @@ -0,0 +1,103 @@ +#!/bin/tcsh + +# NB: this makes a table of whereami results when a Clust_table.1D is +# input. It rearranges information with some awk-ward fun. +# +# This script is run from a directory with the Clust_mask+tlrc.HEAD +# dataset output by 3dClusterize or the AFNI GUI Clusterize plugin. +# To run, type: +# +# tcsh do_06_clust_olap.tcsh +# +# Update: Related to a later Message Board question about using a +# FreeSurfer output ROI as an overlap reference, you could take a dset +# (which might be readily be a 'follower ROI dataset' from +# afni_proc.py processing), atlasize it (with @MakeLabelTable) and +# then use it as a ref_atl below. In detail: +# +# 3dcopy follow_ROI_aaseg+tlrc.HEAD follow_ROI_aaseg_ATLIZE.nii.gz +# @MakeLabelTable -atlasize_labeled_dset follow_ROI_aaseg_ATLIZE.nii.gz +# +# ... and then assign 'follow_ROI_aaseg_ATLIZE' to the variabel +# ref_atl below (noting that the filename extension of the atlas +# dataset is not included in the ref_atl variable). +# +# [PA Taylor (SSCC, NIMH, NIH, USA): June 26, 2023] +# --------------------------------------------------------------------------- + +setenv AFNI_WHEREAMI_NO_WARN YES + +set ref_atl = MNI_Glasser_HCP_v1.0 + +set all_type = ( olap ) + +set min_olap = 10 # minimum percentile for overlap to be included in table + +foreach ii ( `seq 1 1 ${#all_type}` ) + set type = "${all_type[$ii]}" + + echo "++ Make table for: ${type}" + + # start the output file and formatting + set ofile = info_table_clust_wami_${type}.txt + printf "" > ${ofile} + printf "%5s %5s %7s %s \n" \ + "Clust" "Nvox" "Overlap" "ROI location" \ + |& tee -a ${ofile} + + set idset = Clust_mask+tlrc.HEAD # dumped by GUI-Clusterize + set nclust = `3dinfo -max "${idset}"` + + set tfile = __tmp_file_olap.txt + + # go through the dset, int by int, to parse a bit + foreach nn ( `seq 1 1 ${nclust}` ) + # create zero-based index + @ mm = $nn - 1 + + set nvox = `3dROIstats -nomeanout -quiet -nzvoxels \ + -mask "${idset}<${nn}>" \ + "${idset}"` + + whereami \ + -omask "${idset}<${nn}>" \ + -atlas "${ref_atl}" \ + | grep --color=never '% overlap with' \ + > ${tfile} + + set nrow = `cat ${tfile} | wc -l` + + set NEED_ONE = 1 + foreach rr ( `seq 1 1 ${nrow}` ) + set line = `cat ${tfile} | sed -n ${rr}p` + set perc = `echo "${line}" | awk '{print $1}'` + + set roi = `echo "${line}" \ + | awk -F'% overlap with' '{print $2}' \ + | awk -F, '{print $1}'` + + + if ( ${NEED_ONE} ) then + printf "%5d %5d %7.1f%% %s \n" \ + "${nn}" "${nvox}" "${perc}" "${roi}" \ + |& tee -a ${ofile} + set NEED_ONE = 0 + else if (`echo "${perc} > ${min_olap}" | bc` ) then + printf "%5s %5s %7.1f%% %s \n" \ + "" "" "${perc}" "${roi}" \ + |& tee -a ${ofile} + endif + end + end +end + +\rm ${tfile} + +cat <