diff --git a/PUMI/pipelines/func/deconfound.py b/PUMI/pipelines/func/deconfound.py index 43f9d7d..bb5fd49 100644 --- a/PUMI/pipelines/func/deconfound.py +++ b/PUMI/pipelines/func/deconfound.py @@ -30,7 +30,19 @@ def fieldmap_correction_qc(wf, volume='middle', **kwargs): """ - def create_montage(vol_1, vol_2, vol_corrected): + def get_cut_cords(func, n_slices=10): + import nibabel as nib + import numpy as np + + func_img = nib.load(func) + y_dim = func_img.shape[1] # y-dimension (coronal direction) is the second dimension in the image shape + + slices = np.linspace(-y_dim / 2, y_dim / 2, n_slices) + # slices might contain floats but this is not a problem since nilearn will round floats to the + # nearest integer value! + return slices + + def create_montage(vol_1, vol_2, vol_corrected, n_slices=10): from matplotlib import pyplot as plt from pathlib import Path from nilearn import plotting @@ -38,9 +50,12 @@ def create_montage(vol_1, vol_2, vol_corrected): fig, axes = plt.subplots(3, 1, facecolor='black', figsize=(10, 15)) - plotting.plot_anat(vol_1, display_mode='ortho', title='Image #1', black_bg=True, axes=axes[0]) - plotting.plot_anat(vol_2, display_mode='ortho', title='Image #2', black_bg=True, axes=axes[1]) - plotting.plot_anat(vol_corrected, display_mode='ortho', title='Corrected', black_bg=True, axes=axes[2]) + plotting.plot_anat(vol_1, display_mode='y', cut_coords=get_cut_cords(vol_1, n_slices=n_slices), + title='Image #1', black_bg=True, axes=axes[0]) + plotting.plot_anat(vol_2, display_mode='y', cut_coords=get_cut_cords(vol_2, n_slices=n_slices), + title='Image #2', black_bg=True, axes=axes[1]) + plotting.plot_anat(vol_corrected, display_mode='y', cut_coords=get_cut_cords(vol_corrected, n_slices=n_slices), + title='Corrected', black_bg=True, axes=axes[2]) path = str(Path(os.getcwd() + '/fieldmap_correction_comparison.png')) plt.savefig(path) @@ -67,19 +82,21 @@ def create_montage(vol_1, vol_2, vol_corrected): wf.connect(vol_corrected, 'out_file', montage, 'vol_corrected') wf.connect(montage, 'out_file', 'outputspec', 'out_file') - wf.connect(montage, 'out_file', 'sinker', 'out_file') + wf.connect(montage, 'out_file', 'sinker', 'qc_fieldmap_correction') @FuncPipeline(inputspec_fields=['func_1', 'func_2'], outputspec_fields=['out_file']) -def fieldmap_correction(wf, encoding_direction=['y-', 'y'], readout_times=[0.08264, 0.08264], tr=0.72, **kwargs): +def fieldmap_correction(wf, encoding_direction=['x-', 'x'], trt=[0.0522, 0.0522], tr=0.72, **kwargs): """ Fieldmap correction pipeline. Parameters: encoding_direction (list): List of encoding directions (default is left-right and right-left phase encoding). - readout_times (list): List of readout times (default adapted to rsfMRI data of the HCP WU 1200 dataset). + trt (list): List of total readout times (default adapted to rsfMRI data of the HCP WU 1200 dataset). + Default is: + 1*(10**(-3))*EchoSpacingMS*EpiFactor = 1*(10**(-3))*0.58*90 = 0.0522 (for LR and RL image) tr (float): Repetition time (default adapted to rsfMRI data of the HCP WU 1200 dataset). Inputs: @@ -92,8 +109,11 @@ def fieldmap_correction(wf, encoding_direction=['y-', 'y'], readout_times=[0.082 Sinking: - 4d distortion corrected image. - For more information regarding the parameters: + + For more information: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/ExampleTopupFollowedByApplytopup + https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/Faq#How_do_I_know_what_phase-encode_vectors_to_put_into_my_--datain_text_file.3F + https://www.humanconnectome.org/storage/app/media/documentation/s1200/HCP_S1200_Release_Appendix_I.pdf """ @@ -127,7 +147,7 @@ def fieldmap_correction(wf, encoding_direction=['y-', 'y'], readout_times=[0.082 # Estimate susceptibility induced distortions topup = Node(fsl.TOPUP(), name='topup') topup.inputs.encoding_direction = encoding_direction - topup.inputs.readout_times = readout_times + topup.inputs.readout_times = trt wf.connect(merger, 'merged_file', topup, 'in_file') # The two original 4D files are also needed inside a list diff --git a/pipelines/hcp_rcpl.py b/pipelines/hcp.py similarity index 62% rename from pipelines/hcp_rcpl.py rename to pipelines/hcp.py index c66769c..c7e97b6 100644 --- a/pipelines/hcp_rcpl.py +++ b/pipelines/hcp.py @@ -1,24 +1,20 @@ #!/usr/bin/env python3 -import argparse -import glob -from PUMI import globals -from nipype import IdentityInterface, DataGrabber from nipype.interfaces.fsl import Reorient2Std from nipype.interfaces import afni from PUMI.engine import BidsPipeline, NestedNode as Node, FuncPipeline, GroupPipeline, BidsApp from PUMI.pipelines.anat.anat_proc import anat_proc -from PUMI.pipelines.func.compcor import anat_noise_roi +from PUMI.pipelines.func.compcor import anat_noise_roi, compcor from PUMI.pipelines.anat.func_to_anat import func2anat -from PUMI.pipelines.func.deconfound import fieldmap_correction from nipype.interfaces import utility + +from PUMI.pipelines.func.deconfound import fieldmap_correction from PUMI.pipelines.func.func_proc import func_proc_despike_afni -from PUMI.pipelines.func.timeseries_extractor import extract_timeseries_nativespace +from PUMI.pipelines.func.timeseries_extractor import pick_atlas, extract_timeseries_nativespace from PUMI.utils import mist_modules, mist_labels, get_reference from PUMI.pipelines.func.func2standard import func2standard -from PUMI.engine import NestedWorkflow as Workflow - -from pathlib import Path +from PUMI.pipelines.multimodal.image_manipulation import pick_volume +from PUMI.engine import save_software_versions import traits import os @@ -369,169 +365,121 @@ def merge_predictions(rpn_out_file, rcpl_out_file): wf.connect(merge_predictions_wf, 'out_file', 'sinker', 'pain_predictions') -parser = argparse.ArgumentParser() +@BidsPipeline(output_query={ + 'T1w': dict( + datatype='anat', + suffix='T1w', + extension=['nii', 'nii.gz'] + ), + 'bold_lr': dict( + datatype='func', + suffix='bold', + acquisition='LR', + extension=['nii', 'nii.gz'] + ), + 'bold_rl': dict( + datatype='func', + suffix='bold', + acquisition='RL', + extension=['nii', 'nii.gz'] + ) +}) +def hcp(wf, bbr=True, **kwargs): + """ + The HCP pipeline is the RCPL pipeline but with different inputs (two bold images with different phase encodings + instead of one bold image) and with additional fieldmap correction. -parser.add_argument( - '--bids_dir', - required=True, - help='Root directory of the input dataset.' -) + CAUTION: This pipeline assumes that you converted the HCP dataset into the BIDS format! + """ -parser.add_argument( - '--output_dir', - required=True, - help='Directory where the results will be stored.' + print('* bbr:', bbr) + + reorient_struct_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_struct_wf") + wf.connect('inputspec', 'T1w', reorient_struct_wf, 'in_file') + + reorient_func_lr_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_func_lr_wf") + wf.connect('inputspec', 'bold_lr', reorient_func_lr_wf, 'in_file') + + reorient_func_rl_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_func_rl_wf") + wf.connect('inputspec', 'bold_rl', reorient_func_rl_wf, 'in_file') + + fieldmap_corr = fieldmap_correction('fieldmap_corr') + wf.connect(reorient_func_lr_wf, 'out_file', fieldmap_corr, 'func_1') + wf.connect(reorient_func_rl_wf, 'out_file', fieldmap_corr, 'func_2') + + anatomical_preprocessing_wf = anat_proc(name='anatomical_preprocessing_wf', bet_tool='deepbet') + wf.connect(reorient_struct_wf, 'out_file', anatomical_preprocessing_wf, 'in_file') + + func2anat_wf = func2anat(name='func2anat_wf', bbr=bbr) + wf.connect(fieldmap_corr, 'out_file', func2anat_wf, 'func') + wf.connect(anatomical_preprocessing_wf, 'brain', func2anat_wf, 'head') + wf.connect(anatomical_preprocessing_wf, 'probmap_wm', func2anat_wf, 'anat_wm_segmentation') + wf.connect(anatomical_preprocessing_wf, 'probmap_csf', func2anat_wf, 'anat_csf_segmentation') + wf.connect(anatomical_preprocessing_wf, 'probmap_gm', func2anat_wf, 'anat_gm_segmentation') + wf.connect(anatomical_preprocessing_wf, 'probmap_ventricle', func2anat_wf, 'anat_ventricle_segmentation') + + compcor_roi_wf = anat_noise_roi('compcor_roi_wf') + wf.connect(func2anat_wf, 'wm_mask_in_funcspace', compcor_roi_wf, 'wm_mask') + wf.connect(func2anat_wf, 'ventricle_mask_in_funcspace', compcor_roi_wf, 'ventricle_mask') + + func_proc_wf = func_proc_despike_afni('func_proc_wf', bet_tool='deepbet', deepbet_n_dilate=2) + wf.connect(fieldmap_corr, 'out_file', func_proc_wf, 'func') + wf.connect(compcor_roi_wf, 'out_file', func_proc_wf, 'cc_noise_roi') + + pick_atlas_wf = mist_atlas('pick_atlas_wf') + mist_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data_in/atlas/MIST")) + pick_atlas_wf.get_node('inputspec').inputs.labelmap = os.path.join(mist_dir, 'Parcellations/MIST_122.nii.gz') + pick_atlas_wf.get_node('inputspec').inputs.modules = mist_modules(mist_directory=mist_dir, resolution="122") + pick_atlas_wf.get_node('inputspec').inputs.labels = mist_labels(mist_directory=mist_dir, resolution="122") + + extract_timeseries = extract_timeseries_nativespace('extract_timeseries') + wf.connect(pick_atlas_wf, 'relabeled_atlas', extract_timeseries, 'atlas') + wf.connect(pick_atlas_wf, 'reordered_labels', extract_timeseries, 'labels') + wf.connect(pick_atlas_wf, 'reordered_modules', extract_timeseries, 'modules') + wf.connect(anatomical_preprocessing_wf, 'brain', extract_timeseries, 'anat') + wf.connect(func2anat_wf, 'anat_to_func_linear_xfm', extract_timeseries, 'inv_linear_reg_mtrx') + wf.connect(anatomical_preprocessing_wf, 'mni2anat_warpfield', extract_timeseries, 'inv_nonlinear_reg_mtrx') + wf.connect(func2anat_wf, 'gm_mask_in_funcspace', extract_timeseries, 'gm_mask') + wf.connect(func_proc_wf, 'func_preprocessed', extract_timeseries, 'func') + wf.connect(func_proc_wf, 'FD', extract_timeseries, 'confounds') + + func2std = func2standard('func2std') + wf.connect(anatomical_preprocessing_wf, 'brain', func2std, 'anat') + wf.connect(func2anat_wf, 'func_to_anat_linear_xfm', func2std, 'linear_reg_mtrx') + wf.connect(anatomical_preprocessing_wf, 'anat2mni_warpfield', func2std, 'nonlinear_reg_mtrx') + wf.connect(anatomical_preprocessing_wf, 'std_template', func2std, 'reference_brain') + wf.connect(func_proc_wf, 'func_preprocessed', func2std, 'func') + wf.connect(func_proc_wf, 'mc_ref_vol', func2std, 'bbr2ants_source_file') + + calculate_connectivity_wf = calculate_connectivity('calculate_connectivity_wf') + wf.connect(extract_timeseries, 'timeseries', calculate_connectivity_wf, 'ts_files') + wf.connect(func_proc_wf, 'FD', calculate_connectivity_wf, 'fd_files') + + predict_pain_sensitivity_rpn_wf = predict_pain_sensitivity_rpn('predict_pain_sensitivity_rpn_wf') + wf.connect(calculate_connectivity_wf, 'features', predict_pain_sensitivity_rpn_wf, 'X') + wf.connect(fieldmap_corr, 'out_file', predict_pain_sensitivity_rpn_wf, 'in_file') + + predict_pain_sensitivity_rcpl_wf = predict_pain_sensitivity_rcpl('predict_pain_sensitivity_rcpl_wf') + wf.connect(calculate_connectivity_wf, 'features', predict_pain_sensitivity_rcpl_wf, 'X') + wf.connect(fieldmap_corr, 'out_file', predict_pain_sensitivity_rcpl_wf, 'in_file') + + collect_pain_predictions_wf = collect_pain_predictions('collect_pain_predictions_wf') + wf.connect(predict_pain_sensitivity_rpn_wf, 'out_file', collect_pain_predictions_wf, 'rpn_out_file') + wf.connect(predict_pain_sensitivity_rcpl_wf, 'out_file', collect_pain_predictions_wf, 'rcpl_out_file') + + wf.write_graph('HCP-pipeline.png') + save_software_versions(wf) + + +hcp_app = BidsApp( + pipeline=hcp, + name='hcp' ) - -parser.add_argument( +hcp_app.parser.add_argument( '--bbr', default='yes', - type=lambda x: (str(x).lower() == ['true', '1', 'yes']), - help='Use BBR registration: yes/no (default: yes)' + type=lambda x: (str(x).lower() in ['true', '1', 'yes']), + help="Use BBR registration: yes/no (default: yes)" ) -parser.add_argument('--n_procs', type=int, - help='Amount of threads to execute in parallel.' - + 'If not set, the amount of CPU cores is used.' - + 'Caution: Does only work with the MultiProc-plugin!') - -parser.add_argument('--memory_gb', type=int, - help='Memory limit in GB. If not set, use 90% of the available memory' - + 'Caution: Does only work with the MultiProc-plugin!') - - -cli_args = parser.parse_args() - -input_dir = cli_args.bids_dir -output_dir = cli_args.output_dir -bbr = cli_args.bbr - -plugin_args = {} -if cli_args.n_procs is not None: - plugin_args['n_procs'] = cli_args.n_procs - -if cli_args.memory_gb is not None: - plugin_args['memory_gb'] = cli_args.memory_gb - -subjects = [] -excluded = [] -for path in glob.glob(str(input_dir) + '/*'): - id = path.split('/')[-1] - - base = path + '/unprocessed/3T/' - - t1w_base = str(Path(base + '/T1w_MPR1/' + id + '_3T_T1w_MPR1.nii')) - has_t1w = os.path.isfile(t1w_base) or os.path.isfile(t1w_base + '.gz') - - lr_base = str(Path(base + '/rfMRI_REST1_LR/' + id + '_3T_rfMRI_REST1_LR.nii')) - has_lr = os.path.isfile(lr_base) or os.path.isfile(lr_base + '.gz') - - rl_base = str(Path(base + '/rfMRI_REST1_RL/' + id + '_3T_rfMRI_REST1_RL.nii')) - has_rl = os.path.isfile(rl_base) or os.path.isfile(rl_base + '.gz') - - if has_t1w and has_lr and has_rl: - subjects.append(id) - else: - excluded.append(id) - -print('-' * 100) -print(f'Included %d subjects.' % len(subjects)) -print(f'Excluded %d subjects.' % len(excluded)) -print('-' * 100) - - -wf = Workflow(name='HCP-RCPL') -wf.base_dir = '.' -globals.cfg_parser.set('SINKING', 'sink_dir', str(Path(os.path.abspath(output_dir + '/derivatives')))) -globals.cfg_parser.set('SINKING', 'qc_dir', str(Path(os.path.abspath(output_dir + '/derivatives/qc')))) - - -# Create a subroutine (subgraph) for every subject -inputspec = Node(interface=IdentityInterface(fields=['subject']), name='inputspec') -inputspec.iterables = [('subject', subjects)] - -T1w_grabber = Node(DataGrabber(infields=['subject'], outfields=['T1w']), name='T1w_grabber') -T1w_grabber.inputs.base_directory = os.path.abspath(input_dir) -T1w_grabber.inputs.template = '%s/unprocessed/3T/T1w_MPR1/*T1w_MPR1.nii*' -T1w_grabber.inputs.sort_filelist = True -wf.connect(inputspec, 'subject', T1w_grabber, 'subject') - -bold_lr_grabber = Node(DataGrabber(infields=['subject'], outfields=['bold_lr']), name='bold_lr_grabber') -bold_lr_grabber.inputs.base_directory = os.path.abspath(input_dir) -bold_lr_grabber.inputs.template = '%s/unprocessed/3T/rfMRI_REST1_LR/*_3T_rfMRI_REST1_LR.nii*' -bold_lr_grabber.inputs.sort_filelist = True -wf.connect(inputspec, 'subject', bold_lr_grabber, 'subject') - -bold_rl_grabber = Node(DataGrabber(infields=['subject'], outfields=['bold_rl']), name='bold_rl_grabber') -bold_rl_grabber.inputs.base_directory = os.path.abspath(input_dir) -bold_rl_grabber.inputs.template = '%s/unprocessed/3T/rfMRI_REST1_RL/*_3T_rfMRI_REST1_RL.nii*' -bold_rl_grabber.inputs.sort_filelist = True -wf.connect(inputspec, 'subject', bold_rl_grabber, 'subject') - -reorient_struct_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_struct_wf") -wf.connect(T1w_grabber, 'T1w', reorient_struct_wf, 'in_file') - -reorient_func_lr_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_func_lr_wf") -wf.connect(bold_lr_grabber, 'bold_lr', reorient_func_lr_wf, 'in_file') - -reorient_func_rl_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_func_rl_wf") -wf.connect(bold_rl_grabber, 'bold_rl', reorient_func_rl_wf, 'in_file') - -fieldmap_corr = fieldmap_correction('fieldmap_corr') -wf.connect(reorient_func_lr_wf, 'out_file', fieldmap_corr, 'func_1') -wf.connect(reorient_func_rl_wf, 'out_file', fieldmap_corr, 'func_2') - -anatomical_preprocessing_wf = anat_proc(name='anatomical_preprocessing_wf', bet_tool='deepbet') -wf.connect(reorient_struct_wf, 'out_file', anatomical_preprocessing_wf, 'in_file') - -func2anat_wf = func2anat(name='func2anat_wf', bbr=bbr) -wf.connect(fieldmap_corr, 'out_file', func2anat_wf, 'func') -wf.connect(anatomical_preprocessing_wf, 'brain', func2anat_wf, 'head') -wf.connect(anatomical_preprocessing_wf, 'probmap_wm', func2anat_wf, 'anat_wm_segmentation') -wf.connect(anatomical_preprocessing_wf, 'probmap_csf', func2anat_wf, 'anat_csf_segmentation') -wf.connect(anatomical_preprocessing_wf, 'probmap_gm', func2anat_wf, 'anat_gm_segmentation') -wf.connect(anatomical_preprocessing_wf, 'probmap_ventricle', func2anat_wf, 'anat_ventricle_segmentation') - -compcor_roi_wf = anat_noise_roi('compcor_roi_wf') -wf.connect(func2anat_wf, 'wm_mask_in_funcspace', compcor_roi_wf, 'wm_mask') -wf.connect(func2anat_wf, 'ventricle_mask_in_funcspace', compcor_roi_wf, 'ventricle_mask') - -func_proc_wf = func_proc_despike_afni('func_proc_wf', bet_tool='deepbet', deepbet_n_dilate=2) -wf.connect(fieldmap_corr, 'out_file', func_proc_wf, 'func') -wf.connect(compcor_roi_wf, 'out_file', func_proc_wf, 'cc_noise_roi') - -pick_atlas_wf = mist_atlas('pick_atlas_wf') -mist_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data_in/atlas/MIST")) -pick_atlas_wf.get_node('inputspec').inputs.labelmap = os.path.join(mist_dir, 'Parcellations/MIST_122.nii.gz') -pick_atlas_wf.get_node('inputspec').inputs.modules = mist_modules(mist_directory=mist_dir, resolution="122") -pick_atlas_wf.get_node('inputspec').inputs.labels = mist_labels(mist_directory=mist_dir, resolution="122") - -extract_timeseries = extract_timeseries_nativespace('extract_timeseries') -wf.connect(pick_atlas_wf, 'relabeled_atlas', extract_timeseries, 'atlas') -wf.connect(pick_atlas_wf, 'reordered_labels', extract_timeseries, 'labels') -wf.connect(pick_atlas_wf, 'reordered_modules', extract_timeseries, 'modules') -wf.connect(anatomical_preprocessing_wf, 'brain', extract_timeseries, 'anat') -wf.connect(func2anat_wf, 'anat_to_func_linear_xfm', extract_timeseries, 'inv_linear_reg_mtrx') -wf.connect(anatomical_preprocessing_wf, 'mni2anat_warpfield', extract_timeseries, 'inv_nonlinear_reg_mtrx') -wf.connect(func2anat_wf, 'gm_mask_in_funcspace', extract_timeseries, 'gm_mask') -wf.connect(func_proc_wf, 'func_preprocessed', extract_timeseries, 'func') -wf.connect(func_proc_wf, 'FD', extract_timeseries, 'confounds') - -calculate_connectivity_wf = calculate_connectivity('calculate_connectivity_wf') -wf.connect(extract_timeseries, 'timeseries', calculate_connectivity_wf, 'ts_files') -wf.connect(func_proc_wf, 'FD', calculate_connectivity_wf, 'fd_files') - -predict_pain_sensitivity_rpn_wf = predict_pain_sensitivity_rpn('predict_pain_sensitivity_rpn_wf') -wf.connect(calculate_connectivity_wf, 'features', predict_pain_sensitivity_rpn_wf, 'X') -wf.connect(fieldmap_corr, 'out_file', predict_pain_sensitivity_rpn_wf, 'in_file') - -predict_pain_sensitivity_rcpl_wf = predict_pain_sensitivity_rcpl('predict_pain_sensitivity_rcpl_wf') -wf.connect(calculate_connectivity_wf, 'features', predict_pain_sensitivity_rcpl_wf, 'X') -wf.connect(fieldmap_corr, 'out_file', predict_pain_sensitivity_rcpl_wf, 'in_file') - -collect_pain_predictions_wf = collect_pain_predictions('collect_pain_predictions_wf') -wf.connect(predict_pain_sensitivity_rpn_wf, 'out_file', collect_pain_predictions_wf, 'rpn_out_file') -wf.connect(predict_pain_sensitivity_rcpl_wf, 'out_file', collect_pain_predictions_wf, 'rcpl_out_file') - -wf.write_graph('Pipeline.png') -wf.run(plugin='MultiProc', plugin_args=plugin_args) +hcp_app.run() diff --git a/pipelines/hcp_lr_only.py b/pipelines/hcp_lr_only.py new file mode 100644 index 0000000..835dc5e --- /dev/null +++ b/pipelines/hcp_lr_only.py @@ -0,0 +1,470 @@ +#!/usr/bin/env python3 + +from nipype.interfaces.fsl import Reorient2Std +from nipype.interfaces import afni +from PUMI.engine import BidsPipeline, NestedNode as Node, FuncPipeline, GroupPipeline, BidsApp +from PUMI.pipelines.anat.anat_proc import anat_proc +from PUMI.pipelines.func.compcor import anat_noise_roi, compcor +from PUMI.pipelines.anat.func_to_anat import func2anat +from nipype.interfaces import utility + +from PUMI.pipelines.func.deconfound import fieldmap_correction +from PUMI.pipelines.func.func_proc import func_proc_despike_afni +from PUMI.pipelines.func.timeseries_extractor import pick_atlas, extract_timeseries_nativespace +from PUMI.utils import mist_modules, mist_labels, get_reference +from PUMI.pipelines.func.func2standard import func2standard +from PUMI.pipelines.multimodal.image_manipulation import pick_volume +from PUMI.engine import save_software_versions +import traits +import os + + +def relabel_mist_atlas(atlas_file, modules, labels): + """ + Relabel MIST atlas + * Beware : currently works only with labelmap!! + Parameters: + atlas_file(str): Path to the atlas file + modules ([str]): List containing the modules in MIST + labels ([str]): List containing the labels in MIST + Returns: + relabel_file (str): Path to relabeld atlas file + reordered_modules ([str]): list containing reordered module names + reordered_labels ([str]): list containing reordered label names + new_labels (str): Path to .tsv-file with the new labels + """ + + import os + import numpy as np + import pandas as pd + import nibabel as nib + + df = pd.DataFrame({'modules': modules, 'labels': labels}) + df.index += 1 # indexing from 1 + + reordered = df.sort_values(by='modules') + + # relabel labelmap + img = nib.load(atlas_file) + if len(img.shape) != 3: + raise Exception("relabeling does not work for probability maps!") + + lut = reordered.reset_index().sort_values(by="index").index.values + 1 + lut = np.array([0] + lut.tolist()) + # maybe this is a bit complicated, but believe me it does what it should + + data = img.get_fdata() + newdata = lut[np.array(data, dtype=np.int32)] # apply lookup table to swap labels + + img = nib.Nifti1Image(newdata.astype(np.float64), img.affine) + nib.save(img, 'relabeled_atlas.nii.gz') + + out = reordered.reset_index() + out.index = out.index + 1 + relabel_file = os.path.join(os.getcwd(), 'relabeled_atlas.nii.gz') + reordered_modules = reordered['modules'].values.tolist() + reordered_labels = reordered['labels'].values.tolist() + + newlabels_file = os.path.join(os.getcwd(), 'newlabels.tsv') + out.to_csv(newlabels_file, sep='\t') + return relabel_file, reordered_modules, reordered_labels, newlabels_file + +@GroupPipeline(inputspec_fields=['labelmap', 'modules', 'labels'], + outputspec_fields=['relabeled_atlas', 'reordered_labels', 'reordered_modules']) +def mist_atlas(wf, reorder=True, **kwargs): + + resample_atlas = Node( + interface=afni.Resample( + outputtype='NIFTI_GZ', + master=get_reference(wf, 'brain'), + ), + name='resample_atlas' + ) + + if reorder: + # reorder if modules is given (like for MIST atlases) + relabel_atls = Node( + interface=utility.Function( + input_names=['atlas_file', 'modules', 'labels'], + output_names=['relabelled_atlas_file', 'reordered_modules', 'reordered_labels', 'newlabels_file'], + function=relabel_mist_atlas + ), + name='relabel_atls' + ) + wf.connect('inputspec', 'labelmap', relabel_atls, 'atlas_file') + wf.connect('inputspec', 'modules', relabel_atls, 'modules') + wf.connect('inputspec', 'labels', relabel_atls, 'labels') + + wf.connect(relabel_atls, 'relabelled_atlas_file', resample_atlas, 'in_file') + else: + wf.connect('inputspec', 'labelmap', resample_atlas, 'in_file') + + # Sinking + wf.connect(resample_atlas, 'out_file', 'sinker', 'atlas') + if reorder: + wf.connect(relabel_atls, 'newlabels_file', 'sinker', 'reordered_labels') + else: + wf.connect('inputspec', 'labels', 'sinker', 'atlas_labels') + + # Output + wf.connect(resample_atlas, 'out_file', 'outputspec', 'relabeled_atlas') + if reorder: + wf.connect(relabel_atls, 'reordered_labels', 'outputspec', 'reordered_labels') + wf.connect(relabel_atls, 'reordered_modules', 'outputspec', 'reordered_modules') + else: + wf.connect('inputspec', 'labels', 'outputspec', 'reordered_labels') + wf.connect('inputspec', 'modules', 'outputspec', 'reordered_modules') + + +@FuncPipeline(inputspec_fields=['ts_files', 'fd_files', 'scrub_threshold'], + outputspec_fields=['features', 'out_file']) +def calculate_connectivity(wf, **kwargs): + + def calc_connectivity(ts_files, fd_files, scrub_threshold): + import os + import pandas as pd + import numpy as np + from PUMI.PAINTeR import load_timeseries, connectivity_matrix + + if not isinstance(ts_files, (list, np.ndarray)): # in this case we assume we have a string or path-like object + ts_files = [ts_files] + if not isinstance(fd_files, (list, np.ndarray)): # in this case we assume we have a string or path-like object + fd_files = [fd_files] + FD = [] + mean_FD = [] + median_FD = [] + max_FD = [] + perc_scrubbed = [] + for f in fd_files: + fd = pd.read_csv(f, sep="\t").values.flatten() + fd = np.insert(fd, 0, 0) + FD.append(fd.ravel()) + mean_FD.append(fd.mean()) + median_FD.append(np.median(fd)) + max_FD.append(fd.max()) + perc_scrubbed.append(100 - 100 * len(fd) / len(fd[fd <= scrub_threshold])) + + df = pd.DataFrame() + df['ts_file'] = ts_files + df['fd_file'] = fd_files + df['meanFD'] = mean_FD + df['medianFD'] = median_FD + df['maxFD'] = max_FD + df['perc_scrubbed'] = perc_scrubbed + + ts, labels = load_timeseries(ts_files, df, scrubbing=True, scrub_threshold=scrub_threshold) + features, cm = connectivity_matrix(np.array(ts)) + + path = os.path.abspath('motion.csv') + df.to_csv(path) + return features, path + + connectivity_wf = Node( + utility.Function( + input_names=['ts_files', 'fd_files', 'scrub_threshold'], + output_names=['features', 'out_file'], + function=calc_connectivity + ), + name="connectivity_wf" + ) + wf.connect('inputspec', 'ts_files', connectivity_wf, 'ts_files') + wf.connect('inputspec', 'fd_files', connectivity_wf, 'fd_files') + + if isinstance(wf.get_node('inputspec').inputs.scrub_threshold, traits.trait_base._Undefined): + connectivity_wf.inputs.scrub_threshold = .15 + else: + wf.connect('inputspec', 'scrub_threshold', connectivity_wf, 'scrub_threshold') + + wf.connect(connectivity_wf, 'features', 'outputspec', 'features') + wf.connect(connectivity_wf, 'out_file', 'outputspec', 'out_file') + + wf.connect(connectivity_wf, 'out_file', 'sinker', 'connectivity') + + +@FuncPipeline(inputspec_fields=['X', 'in_file'], + outputspec_fields=['score', 'out_file']) +def predict_pain_sensitivity_rpn(wf, **kwargs): + """ + + Perform pain sensitivity prediction using the RPN signature + (Resting-state Pain susceptibility Network signature). + Further information regarding the signature: https://spisakt.github.io/RPN-signature/ + + Inputs: + X (array-like): Input data for pain sensitivity prediction + in_file (str): Path to the bold file that was used to create X + + Outputs: + predicted (float): Predicted pain sensitivity score + out_file (str): Absolute path to the output CSV file containing the prediction result + + Sinking: + CSV file containing the prediction result + + """ + + def predict(X, in_file): + from PUMI.utils import rpn_model + import pandas as pd + import PUMI + import os + import importlib + + with importlib.resources.path('resources', 'model_rpn.json') as file: + model_json = file + + model = rpn_model(file=model_json) + predicted = model.predict(X) + + path = os.path.abspath('rpn-prediction.csv') + df = pd.DataFrame() + df['in_file'] = [in_file] + df['RPN'] = predicted + df.to_csv(path, index=False) + return predicted, path + + predict_wf = Node( + utility.Function( + input_names=['X', 'in_file'], + output_names=['score', 'out_file'], + function=predict + ), + name="predict_wf" + ) + wf.connect('inputspec', 'X', predict_wf, 'X') + wf.connect('inputspec', 'in_file', predict_wf, 'in_file') + + wf.connect(predict_wf, 'score', 'outputspec', 'score') + wf.connect(predict_wf, 'out_file', 'outputspec', 'out_file') + wf.connect(predict_wf, 'out_file', 'sinker', 'rpn') + + +@FuncPipeline(inputspec_fields=['X', 'in_file'], + outputspec_fields=['score', 'out_file']) +def predict_pain_sensitivity_rcpl(wf, model_path=None, **kwargs): + """ + + Perform pain sensitivity prediction using RCPL signature + (Resting-state functional Connectivity signature of Pain-related Learning). + Further information regarding the signature: https://github.com/kincsesbalint/paintone_rsn + + Parameters: + model_path (str, optional): Path to the pre-trained model relative to PUMI's data_in folder. + If set to None, PUMI's build in RCPL model is used. + + Inputs: + X (array-like): Input data for pain sensitivity prediction + in_file (str): Path to the bold file that was used to create X + + Outputs: + predicted (float): Predicted pain sensitivity score + out_file (str): Absolute path to the output CSV file containing the prediction result + + Sinking: + CSV file containing the prediction result + + """ + + def predict(X, in_file, model_path): + import pandas as pd + import os + import PUMI + import joblib + import importlib + + if model_path is None: + with importlib.resources.path('resources', 'rcpl_model.sav') as file: + model = joblib.load(file) + else: + if not os.path.exists(model_path): + raise FileNotFoundError(f"Model file not found: {model_path}") + + data_in_folder = os.path.join(os.path.dirname(os.path.abspath(PUMI.__file__)), '..', 'data_in') + model_path = os.path.join(data_in_folder, model_path) + model = joblib.load(model_path) + + predicted = model.predict(X) + + path = os.path.abspath('rcpl-prediction.csv') + df = pd.DataFrame() + df['in_file'] = [in_file] + df['RCPL'] = predicted + df.to_csv(path, index=False) + + return predicted, path + + predict_wf = Node( + utility.Function( + input_names=['X', 'in_file', 'model_path'], + output_names=['score', 'out_file'], + function=predict + ), + name="predict_wf" + ) + wf.connect('inputspec', 'X', predict_wf, 'X') + wf.connect('inputspec', 'in_file', predict_wf, 'in_file') + predict_wf.inputs.model_path = model_path + + wf.connect(predict_wf, 'score', 'outputspec', 'score') + wf.connect(predict_wf, 'out_file', 'outputspec', 'out_file') + wf.connect(predict_wf, 'out_file', 'sinker', 'rcpl') + + +@FuncPipeline(inputspec_fields=['rpn_out_file', 'rcpl_out_file'], + outputspec_fields=['out_file']) +def collect_pain_predictions(wf, **kwargs): + """ + + Merge the out_file's of pain sensitivity predictions generated using the RCPL and RPN methods into one file + + Inputs: + rpn_out_file (str): Path to the out_file generated by the RPN method + rcpl_out_file (str): Path to the out_file generated by the RCPL method + + Outputs: + out_file (str): Absolute path to the output CSV file containing the RPN and RCPL predictions. + + Sinking: + CSV file containing RPN and RCPL predictions + + """ + + def merge_predictions(rpn_out_file, rcpl_out_file): + import pandas as pd + import os + + df_rpn = pd.read_csv(rpn_out_file) + df_rcpl = pd.read_csv(rcpl_out_file) + + # Check if in_file columns are the same + if df_rpn['in_file'].iloc[0] != df_rcpl['in_file'].iloc[0]: + raise ValueError("The 'in_file' columns in the two CSV files are not the same!") + + merged_df = pd.DataFrame() + merged_df['in_file'] = df_rpn['in_file'] + merged_df['RPN'] = df_rpn['RPN'] + merged_df['RCPL'] = df_rcpl['RCPL'] + + path = os.path.abspath('pain-sensitivity-predictions.csv') + merged_df.to_csv(path, index=False) + + return path + + merge_predictions_wf = Node( + utility.Function( + input_names=['rpn_out_file', 'rcpl_out_file'], + output_names=['out_file'], + function=merge_predictions + ), + name="merge_predictions_wf" + ) + wf.connect('inputspec', 'rpn_out_file', merge_predictions_wf, 'rpn_out_file') + wf.connect('inputspec', 'rcpl_out_file', merge_predictions_wf, 'rcpl_out_file') + + wf.connect(merge_predictions_wf, 'out_file', 'outputspec', 'out_file') + wf.connect(merge_predictions_wf, 'out_file', 'sinker', 'pain_predictions') + + +@BidsPipeline(output_query={ + 'T1w': dict( + datatype='anat', + suffix='T1w', + extension=['nii', 'nii.gz'] + ), + 'bold_lr': dict( + datatype='func', + suffix='bold', + acquisition='LR', + extension=['nii', 'nii.gz'] + ) +}) +def hcp(wf, bbr=True, **kwargs): + """ + The HCP pipeline is the RCPL pipeline but with different input. + CAUTION: This pipeline assumes that you converted the HCP dataset into the BIDS format! + """ + + print('* bbr:', bbr) + + reorient_struct_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_struct_wf") + wf.connect('inputspec', 'T1w', reorient_struct_wf, 'in_file') + + reorient_func_lr_wf = Node(Reorient2Std(output_type='NIFTI_GZ'), name="reorient_func_lr_wf") + wf.connect('inputspec', 'bold_lr', reorient_func_lr_wf, 'in_file') + + anatomical_preprocessing_wf = anat_proc(name='anatomical_preprocessing_wf', bet_tool='deepbet') + wf.connect(reorient_struct_wf, 'out_file', anatomical_preprocessing_wf, 'in_file') + + func2anat_wf = func2anat(name='func2anat_wf', bbr=bbr) + wf.connect('inputspec', 'bold_lr', func2anat_wf, 'func') + wf.connect(anatomical_preprocessing_wf, 'brain', func2anat_wf, 'head') + wf.connect(anatomical_preprocessing_wf, 'probmap_wm', func2anat_wf, 'anat_wm_segmentation') + wf.connect(anatomical_preprocessing_wf, 'probmap_csf', func2anat_wf, 'anat_csf_segmentation') + wf.connect(anatomical_preprocessing_wf, 'probmap_gm', func2anat_wf, 'anat_gm_segmentation') + wf.connect(anatomical_preprocessing_wf, 'probmap_ventricle', func2anat_wf, 'anat_ventricle_segmentation') + + compcor_roi_wf = anat_noise_roi('compcor_roi_wf') + wf.connect(func2anat_wf, 'wm_mask_in_funcspace', compcor_roi_wf, 'wm_mask') + wf.connect(func2anat_wf, 'ventricle_mask_in_funcspace', compcor_roi_wf, 'ventricle_mask') + + func_proc_wf = func_proc_despike_afni('func_proc_wf', bet_tool='deepbet', deepbet_n_dilate=2) + wf.connect('inputspec', 'bold_lr', func_proc_wf, 'func') + wf.connect(compcor_roi_wf, 'out_file', func_proc_wf, 'cc_noise_roi') + + pick_atlas_wf = mist_atlas('pick_atlas_wf') + mist_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data_in/atlas/MIST")) + pick_atlas_wf.get_node('inputspec').inputs.labelmap = os.path.join(mist_dir, 'Parcellations/MIST_122.nii.gz') + pick_atlas_wf.get_node('inputspec').inputs.modules = mist_modules(mist_directory=mist_dir, resolution="122") + pick_atlas_wf.get_node('inputspec').inputs.labels = mist_labels(mist_directory=mist_dir, resolution="122") + + extract_timeseries = extract_timeseries_nativespace('extract_timeseries') + wf.connect(pick_atlas_wf, 'relabeled_atlas', extract_timeseries, 'atlas') + wf.connect(pick_atlas_wf, 'reordered_labels', extract_timeseries, 'labels') + wf.connect(pick_atlas_wf, 'reordered_modules', extract_timeseries, 'modules') + wf.connect(anatomical_preprocessing_wf, 'brain', extract_timeseries, 'anat') + wf.connect(func2anat_wf, 'anat_to_func_linear_xfm', extract_timeseries, 'inv_linear_reg_mtrx') + wf.connect(anatomical_preprocessing_wf, 'mni2anat_warpfield', extract_timeseries, 'inv_nonlinear_reg_mtrx') + wf.connect(func2anat_wf, 'gm_mask_in_funcspace', extract_timeseries, 'gm_mask') + wf.connect(func_proc_wf, 'func_preprocessed', extract_timeseries, 'func') + wf.connect(func_proc_wf, 'FD', extract_timeseries, 'confounds') + + func2std = func2standard('func2std') + wf.connect(anatomical_preprocessing_wf, 'brain', func2std, 'anat') + wf.connect(func2anat_wf, 'func_to_anat_linear_xfm', func2std, 'linear_reg_mtrx') + wf.connect(anatomical_preprocessing_wf, 'anat2mni_warpfield', func2std, 'nonlinear_reg_mtrx') + wf.connect(anatomical_preprocessing_wf, 'std_template', func2std, 'reference_brain') + wf.connect(func_proc_wf, 'func_preprocessed', func2std, 'func') + wf.connect(func_proc_wf, 'mc_ref_vol', func2std, 'bbr2ants_source_file') + + calculate_connectivity_wf = calculate_connectivity('calculate_connectivity_wf') + wf.connect(extract_timeseries, 'timeseries', calculate_connectivity_wf, 'ts_files') + wf.connect(func_proc_wf, 'FD', calculate_connectivity_wf, 'fd_files') + + predict_pain_sensitivity_rpn_wf = predict_pain_sensitivity_rpn('predict_pain_sensitivity_rpn_wf') + wf.connect(calculate_connectivity_wf, 'features', predict_pain_sensitivity_rpn_wf, 'X') + wf.connect('inputspec', 'bold_lr', predict_pain_sensitivity_rpn_wf, 'in_file') + + predict_pain_sensitivity_rcpl_wf = predict_pain_sensitivity_rcpl('predict_pain_sensitivity_rcpl_wf') + wf.connect(calculate_connectivity_wf, 'features', predict_pain_sensitivity_rcpl_wf, 'X') + wf.connect('inputspec', 'bold_lr', predict_pain_sensitivity_rcpl_wf, 'in_file') + + collect_pain_predictions_wf = collect_pain_predictions('collect_pain_predictions_wf') + wf.connect(predict_pain_sensitivity_rpn_wf, 'out_file', collect_pain_predictions_wf, 'rpn_out_file') + wf.connect(predict_pain_sensitivity_rcpl_wf, 'out_file', collect_pain_predictions_wf, 'rcpl_out_file') + + wf.write_graph('HCP-pipeline.png') + save_software_versions(wf) + + +hcp_app = BidsApp( + pipeline=hcp, + name='hcp' +) +hcp_app.parser.add_argument( + '--bbr', + default='yes', + type=lambda x: (str(x).lower() in ['true', '1', 'yes']), + help="Use BBR registration: yes/no (default: yes)" +) + +hcp_app.run() diff --git a/scripts/check_logs.py b/scripts/check_logs.py new file mode 100644 index 0000000..059c893 --- /dev/null +++ b/scripts/check_logs.py @@ -0,0 +1,38 @@ +import argparse +from pathlib import Path + + +if __name__ == '__main__': + # Create CLI argument + parser = argparse.ArgumentParser() + parser.add_argument('--logs_dir', help='Directory containing the slurm logs', required=True) + + # Parse CLI logs dir and check if it really exists + args = parser.parse_args() + logs_dir = Path(args.logs_dir) + if not logs_dir.exists(): + raise ValueError('Log directory does not exist!') + + log_files = logs_dir.glob('**/sub-*.out', ) # Collect all the log files + failed_subjects = [] + failed_on_node = [] + + for log_file in log_files: + with open(log_file, 'r') as log_file_obj: + lines = log_file_obj.readlines() + loi = lines[-2] # line of interest. Should start with 'Ended on c[...]' + if loi.startswith('Ended on c'): + continue + else: + first_split = lines[1].split('.')[0] # 'Starting on c[Number]' + node_start_idx = first_split.rfind('c') + node = first_split[node_start_idx:] # e.g., 'c78' + failed_on_node.append(node) + failed_subjects.append( + (log_file.stem, node) + ) + print('The following subject(s) crashed:') + for subject, node in failed_subjects: + print(f'{subject} on {node}') + failed = set(failed_on_node) + print(f'Please clean up manually on the following nodes: {failed}') diff --git a/scripts/cluster-pipeline-executor.sh b/scripts/cluster-pipeline-executor.sh index 75d798f..1d93fb4 100644 --- a/scripts/cluster-pipeline-executor.sh +++ b/scripts/cluster-pipeline-executor.sh @@ -12,7 +12,7 @@ CPUS_PER_TASK=15 ################################################################ -while getopts 'i:o:t:l:p:r:m:n:b:d:c:h' opt; do +while getopts 'i:o:t:l:p:r:m:n:b:d:c:s:h' opt; do case "$opt" in i) INDIR="$OPTARG";; o) OUTDIR="$OPTARG";; @@ -25,11 +25,12 @@ while getopts 'i:o:t:l:p:r:m:n:b:d:c:h' opt; do b) BRANCH="$OPTARG";; d) SUBMIT_DELAY="$OPTARG";; c) CPUS_PER_TASK="$OPTARG";; + s) SIF_PATH="$OPTARG";; ?|h) echo "-i Input BIDS dataset" echo "-o Derivatives dir (i.e., where to store the results)" - echo "-t Where to store temporary PUMI workflow files on the cluster (MUST BE SOMEWHERE IN /tmp !)" + echo "-t Where to store temporary PUMI workflow files on the worker nodes (MUST BE an absolute path somewhere in /local/)" echo "-l NFS directory that should be used to store the Slurm log files (+ Apptainer SIF file)" echo "-p PUMI pipeline you want to run (default: '${PIPELINE}')" echo "-r Nipype plugin params to limit resource usage (default: '${RESOURCES}')" @@ -38,6 +39,7 @@ while getopts 'i:o:t:l:p:r:m:n:b:d:c:h' opt; do echo "-b Which PUMI GitHub branch to install (default: '${BRANCH}')" echo "-d Minimum delay between submission of jobs in seconds (default: '${SUBMIT_DELAY}')" echo "-c CPU's per task (default: '${CPUS_PER_TASK}')" + echo "-s Path to Image SIF file. If not provided, pull and convert latest docker image." exit 1 ;; esac @@ -55,6 +57,7 @@ echo "Max Slurm jobs of user (-m): ${MAX_JOBS}" echo "Slurm nice value (-n): ${NICE}" echo "PUMI branch to use (-b): ${BRANCH}" echo "Minimum delay between submission of jobs in seconds (-d): ${SUBMIT_DELAY}" +echo "Path to Image SIF file (-s): ${SIF_PATH}" echo "--------------------------------------------------------------------" # Validation to ensure mandatory options are provided. @@ -65,6 +68,15 @@ if [ -z "${INDIR}" ] || [ -z "${OUTDIR}" ] || [ -z "${TMP_PUMI}" ] || [ -z "${LO exit 1 fi +if [[ "${TMP_PUMI}" == /local/* ]]; +then + echo "-t was specified to a sub-directory in /local. Great!" +else + echo "Error: -t must be set to a sub-directory in /local/" + exit 1 + +fi + ############################# Main script begins here ######################################### dataset_name=$(basename "$INDIR") @@ -72,13 +84,17 @@ mkdir -p "jobs_scripts/${dataset_name}" # Create directory which will contain t mkdir -p "${LOG_PATH}" # Create directory where the jobs will store the slurm outputs in -# Pull (and convert) PUMI image locally and then copy to NFS share where all the jobs can copy it from -rm -rf ${TMP_PUMI}/apptainer_cache/ -mkdir -p ${TMP_PUMI}/apptainer_cache/ -APPTAINER_CACHEDIR=${TMP_PUMI}/apptainer_cache/ \ - apptainer pull ${TMP_PUMI}/PUMI.sif docker://pnilab/pumi:latest -cp ${TMP_PUMI}/PUMI.sif ${LOG_PATH}/PUMI.sif -rm -rf ${TMP_PUMI} +if [ -z "${SIF_PATH}" ]; then + rm -rf ${TMP_PUMI}/apptainer_cache/ + mkdir -p ${TMP_PUMI}/apptainer_cache/ + # Pull (and convert) PUMI image locally and then copy to NFS share where all the jobs can copy it from + APPTAINER_CACHEDIR=${TMP_PUMI}/apptainer_cache/ apptainer pull ${TMP_PUMI}/PUMI.sif docker://pnilab/pumi:latest + cp ${TMP_PUMI}/PUMI.sif ${LOG_PATH}/PUMI.sif + rm -rf ${TMP_PUMI} + SIF_PATH=${LOG_PATH}/PUMI.sif +else + echo "SIF was already provided. No pulling and conversion needed." +fi mkdir -p "${OUTDIR}" @@ -102,36 +118,40 @@ echo "*************************************************************" echo "Starting on \$(hostname) at \$(date +"%T")" echo "*************************************************************" -subject_data_in="${TMP_PUMI}/input/${subject_id}" # Create temporary directory which stores BIDS data for one subject +subject_dir="${TMP_PUMI}/${subject_id}/" + +subject_data_in="\${subject_dir}/input/" # Create temporary directory which stores BIDS data for one subject rm -rf "\${subject_data_in}" mkdir -p "\${subject_data_in}" -subject_data_out="${TMP_PUMI}/output/${subject_id}" # Create temporary directory which stores derivatives for one subject +subject_data_out="\${subject_dir}/output/" # Create temporary directory which stores derivatives for one subject rm -rf "\${subject_data_out}" mkdir -p "\${subject_data_out}" -subject_tmp="${TMP_PUMI}/tmp/${subject_id}" # Create temporary directory which stores derivatives for one subject +subject_tmp="\${subject_dir}/tmp/" # Create temporary directory which stores derivatives for one subject rm -rf "\${subject_tmp}" mkdir -p "\${subject_tmp}" cp -vr "${subject_folder}" "\${subject_data_in}" cp -v "${dataset_description_path}" "\${subject_data_in}" # Every valid BIDS dataset must contain description (otherwise Nipype raises BIDSValidationError) -pumi_dir=${TMP_PUMI}/PUMI/${subject_id}/ +pumi_dir="\${subject_dir}/PUMI/" rm -rf \${pumi_dir} mkdir -p \${pumi_dir} # Create folder in which we clone PUMI into (and parent folders if necessary) -mkdir -p ${TMP_PUMI}/apptainer_image/${subject_id}/ -cp ${LOG_PATH}/PUMI.sif ${TMP_PUMI}/apptainer_image/${subject_id}/PUMI.sif +apptainer_image_dir="\${subject_dir}/apptainer_image/" +apptainer_image="\${apptainer_image_dir}/PUMI.sif" +mkdir -p "\${apptainer_image_dir}" +cp ${SIF_PATH} "\${apptainer_image}" -subject_apptainer_cache_dir=${TMP_PUMI}/apptainer_cache/${subject_id}/ +subject_apptainer_cache_dir="\${subject_dir}/apptainer_app_cache/" rm -rf \${subject_apptainer_cache_dir} mkdir -p \${subject_apptainer_cache_dir} APPTAINER_CACHEDIR=\${subject_apptainer_cache_dir} \ apptainer exec \ --writable-tmpfs \ -${TMP_PUMI}/apptainer_image/${subject_id}/PUMI.sif \ +\${apptainer_image} \ bash -c " \ set -x; \ git clone -b ${BRANCH} https://github.com/pni-lab/PUMI \${pumi_dir}; \ @@ -156,10 +176,7 @@ cp -vr \${subject_data_out}/* ${OUTDIR}/ # Remove (most) files from cluster -rm -rf "\${subject_data_in}" -rm -rf "\${subject_data_out}" -rm -rf "\${subject_tmp}" -rm -rf "\${subject_apptainer_cache_dir}" +rm -rf "\${subject_dir}" echo "*************************************************************" echo "Ended on \$(hostname) at \$(date +"%T")"