highspeed-glm-main.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # ======================================================================
  4. # SCRIPT INFORMATION:
  5. # ======================================================================
  6. # SCRIPT: FIRST LEVEL GLM
  7. # PROJECT: HIGHSPEED
  8. # WRITTEN BY LENNART WITTKUHN, 2018 - 2020
  9. # CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
  10. # MAX PLANCK RESEARCH GROUP NEUROCODE
  11. # MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
  12. # MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
  13. # LENTZEALLEE 94, 14195 BERLIN, GERMANY
  14. # ACKNOWLEDGEMENTS: THANKS TO HRVOJE STOJIC (UCL) FOR HELP
  15. # ======================================================================
  16. # IMPORT RELEVANT PACKAGES
  17. # ======================================================================
  18. # import basic libraries:
  19. import os
  20. import sys
  21. import warnings
  22. from os.path import join as opj
  23. # import nipype libraries:
  24. from nipype.interfaces.utility import Function, IdentityInterface
  25. from nipype.interfaces.io import SelectFiles, DataSink
  26. from nipype.pipeline.engine import Workflow, Node, MapNode
  27. from nipype.utils.profiler import log_nodes_cb
  28. from nipype import config, logging
  29. # import spm and matlab interfaces:
  30. from nipype.algorithms.modelgen import SpecifySPMModel
  31. from nipype.interfaces.spm.model import (
  32. Level1Design, EstimateModel, EstimateContrast, ThresholdStatistics,
  33. Threshold)
  34. from nipype.interfaces.matlab import MatlabCommand
  35. from nipype.interfaces import spm
  36. # import fsl interfaces:
  37. from nipype.workflows.fmri.fsl import create_susan_smooth
  38. from nipype.interfaces.fsl.utils import ExtractROI
  39. # import libraries for bids interaction:
  40. from bids.layout import BIDSLayout
  41. # import freesurfer interfaces:
  42. # import custom functions:
  43. from highspeed_glm_functions import (
  44. get_subject_info, plot_stat_maps, leave_one_out)
  45. import datalada.api as dl
  46. # ======================================================================
  47. # ENVIRONMENT SETTINGS (DEALING WITH ERRORS AND WARNINGS):
  48. # ======================================================================
  49. # set the fsl output type environment variable:
  50. os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ'
  51. # deal with nipype-related warnings:
  52. os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
  53. # inhibit CTF lock
  54. os.environ['MCR_INHIBIT_CTF_LOCK'] = '1'
  55. # filter out warnings related to the numpy package:
  56. warnings.filterwarnings("ignore", message="numpy.dtype size changed*")
  57. warnings.filterwarnings("ignore", message="numpy.ufunc size changed*")
  58. # ======================================================================
  59. # SET PATHS AND SUBJECTS
  60. # ======================================================================
  61. # define paths depending on the operating system (OS) platform:
  62. project = 'highspeed'
  63. # initialize empty paths:
  64. path_root = None
  65. sub_list = None
  66. # path to the project root:
  67. project_name = 'highspeed-glm'
  68. path_root = os.getcwd().split(project_name)[0] + project_name
  69. if 'darwin' in sys.platform:
  70. path_spm = '/Users/Shared/spm12'
  71. path_matlab = '/Applications/MATLAB_R2017a.app/bin/matlab -nodesktop -nosplash'
  72. # set paths for spm:
  73. spm.SPMCommand.set_mlab_paths(paths=path_spm, matlab_cmd=path_matlab)
  74. MatlabCommand.set_default_paths(path_spm)
  75. MatlabCommand.set_default_matlab_cmd(path_matlab)
  76. sub_list = ['sub-01']
  77. elif 'linux' in sys.platform:
  78. # path_matlab = '/home/mpib/wittkuhn/spm12.simg eval \$SPMMCRCMD'
  79. # path_matlab = opj('/home', 'beegfs', 'wittkuhn', 'tools', 'spm', 'spm12.simg eval \$SPMMCRCMD')
  80. singularity_cmd = 'singularity run -B /home/mpib/wittkuhn -B /mnt/beegfs/home/wittkuhn /home/mpib/wittkuhn/highspeed/highspeed-glm/tools/spm/spm12.simg'
  81. singularity_spm = 'eval \$SPMMCRCMD'
  82. path_matlab = ' '.join([singularity_cmd, singularity_spm])
  83. spm.SPMCommand.set_mlab_paths(matlab_cmd=path_matlab, use_mcr=True)
  84. # grab the list of subjects from the bids data set:
  85. layout = BIDSLayout(opj(path_root, 'bids'))
  86. # get all subject ids:
  87. sub_list = sorted(layout.get_subjects())
  88. # create a template to add the "sub-" prefix to the ids
  89. sub_template = ['sub-'] * len(sub_list)
  90. # add the prefix to all ids:
  91. sub_list = ["%s%s" % t for t in zip(sub_template, sub_list)]
  92. # if user defined to run specific subject
  93. sub_list = sub_list[int(sys.argv[1]):int(sys.argv[2])]
  94. # print the SPM version:
  95. print('using SPM version %s' % spm.SPMCommand().version)
  96. # ======================================================================
  97. # DEFINE PBS CLUSTER JOB TEMPLATE (NEEDED WHEN RUNNING ON THE CLUSTER):
  98. # ======================================================================
  99. job_template = """
  100. #PBS -l walltime=10:00:00
  101. #PBS -j oe
  102. #PBS -o /home/mpib/wittkuhn/highspeed/highspeed-glm/logs/glm
  103. #PBS -m n
  104. #PBS -v FSLOUTPUTTYPE=NIFTI_GZ
  105. source /etc/bash_completion.d/virtualenvwrapper
  106. workon highspeed-glm
  107. module load fsl/5.0
  108. module load matlab/R2017b
  109. module load freesurfer/6.0.0
  110. """
  111. # ======================================================================
  112. # SETTING UP LOGGING
  113. # ======================================================================
  114. #path_log = opj(path_root, 'logs', 'l1analyis')
  115. # enable debug mode for logging and configuration:
  116. #config.enable_debug_mode()
  117. # enable logging to file and provide path to the logging file:
  118. #config.update_config({'logging': {'log_directory': path_log,
  119. # 'log_to_file': True},
  120. # 'execution': {'stop_on_first_crash': False,
  121. # 'keep_unnecessary_outputs': 'false'},
  122. # 'monitoring': {'enabled': True}
  123. # })
  124. # update the global logging settings:
  125. # logging.update_logging(config)
  126. # callback_log_path = opj(path_log,'ressources.log')
  127. # logger = logging.getLogger('callback')
  128. # logger.setLevel(logging.DEBUG)
  129. # handler = logging.FileHandler(callback_log_path)
  130. # logger.addHandler(handler)
  131. # ======================================================================
  132. # SETTING UP LOGGING
  133. # ======================================================================
  134. #path_log = opj(path_root, 'logs', 'l1analyis')
  135. ## create directory to save the log files if it does not exist yet:
  136. #if not os.path.exists(path_log):
  137. # os.makedirs(path_log)
  138. ## configure logging:
  139. #logging.basicConfig(
  140. # filename=opj(path_log,'log_l1analysis.log'),
  141. # level=logging.DEBUG,
  142. # filemode = "a+",
  143. # format='%(asctime)s - %(levelname)s - %(message)s',
  144. # datefmt='%d/%m/%Y %H:%M:%S')
  145. #logging.getLogger().addHandler(logging.StreamHandler())
  146. # ======================================================================
  147. # LOG INFORMATION ABOUT THE EXECUTION
  148. # ======================================================================
  149. # print the loaded SPM version:
  150. #analysis_name = "Level 1 GLM analysis"
  151. #logging.info("--------------------------------------------------------")
  152. #logging.info("Analysis: " + analysis_name)
  153. #logging.info("SPM version: " + (spm.SPMCommand().version))
  154. #logging.info("List of subjects:")
  155. #logging.info(sub_list)
  156. # ======================================================================
  157. # DEFINE SETTINGS
  158. # ======================================================================
  159. # time of repetition, in seconds:
  160. time_repetition = 1.25
  161. # total number of runs:
  162. num_runs = 8
  163. # smoothing kernel, in mm:
  164. fwhm = 4
  165. # number of dummy variables to remove from each run:
  166. num_dummy = 0
  167. # ======================================================================
  168. # DEFINE NODE: INFOSOURCE
  169. # ======================================================================
  170. # define the infosource node that collects the data:
  171. infosource = Node(IdentityInterface(
  172. fields=['subject_id']), name='infosource')
  173. # let the node iterate (paralellize) over all subjects:
  174. infosource.iterables = [('subject_id', sub_list)]
  175. # ======================================================================
  176. # DEFINE SELECTFILES NODE
  177. # ======================================================================
  178. path_confounds = opj(
  179. path_root, 'fmriprep', '*', '*',
  180. 'func', '*highspeed*confounds_regressors.tsv')
  181. path_events = opj(
  182. path_root, 'bids', '*', '*', 'func', '*events.tsv')
  183. path_func = opj(
  184. path_root, 'fmriprep', '*',
  185. 'anat', '*highspeed*space-T1w*preproc_bold.nii.gz')
  186. path_anat = opj(
  187. path_root, 'fmriprep', '*', '*',
  188. 'func', '*_desc-preproc_T1w.nii.gz')
  189. path_wholemask = opj(
  190. path_root, 'fmriprep', '*', '*',
  191. 'func', '*highspeed*space-T1w*brain_mask.nii.gz')
  192. dl.get(path_confounds)
  193. dl.get(path_events)
  194. dl.get(path_func)
  195. dl.get(path_anat)
  196. dl.get(path_wholemask)
  197. # define all relevant files paths:
  198. templates = dict(
  199. confounds=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}',
  200. '*', 'func', '*highspeed*confounds_regressors.tsv'),
  201. events=opj(path_root, 'bids', '{subject_id}', '*', 'func',
  202. '*events.tsv'),
  203. func=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}', '*',
  204. 'func', '*highspeed*space-T1w*preproc_bold.nii.gz'),
  205. anat=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}',
  206. 'anat', '{subject_id}_desc-preproc_T1w.nii.gz'),
  207. wholemask=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}',
  208. '*', 'func', '*highspeed*space-T1w*brain_mask.nii.gz'),
  209. )
  210. # define the selectfiles node:
  211. selectfiles = Node(SelectFiles(templates, sort_filelist=True),
  212. name='selectfiles')
  213. # set expected thread and memory usage for the node:
  214. selectfiles.interface.num_threads = 1
  215. selectfiles.interface.mem_gb = 0.1
  216. # selectfiles.inputs.subject_id = 'sub-20'
  217. # selectfiles_results = selectfiles.run()
  218. # ======================================================================
  219. # DEFINE CREATE_SUSAN_SMOOTH WORKFLOW NODE
  220. # ======================================================================
  221. # define the susan smoothing node and specify the smoothing fwhm:
  222. susan = create_susan_smooth()
  223. # set the smoothing kernel:
  224. susan.inputs.inputnode.fwhm = fwhm
  225. # set expected thread and memory usage for the nodes:
  226. susan.get_node('inputnode').interface.num_threads = 1
  227. susan.get_node('inputnode').interface.mem_gb = 0.1
  228. susan.get_node('median').interface.num_threads = 1
  229. susan.get_node('median').interface.mem_gb = 3
  230. susan.get_node('mask').interface.num_threads = 1
  231. susan.get_node('mask').interface.mem_gb = 3
  232. susan.get_node('meanfunc2').interface.num_threads = 1
  233. susan.get_node('meanfunc2').interface.mem_gb = 3
  234. susan.get_node('merge').interface.num_threads = 1
  235. susan.get_node('merge').interface.mem_gb = 3
  236. susan.get_node('multi_inputs').interface.num_threads = 1
  237. susan.get_node('multi_inputs').interface.mem_gb = 3
  238. susan.get_node('smooth').interface.num_threads = 1
  239. susan.get_node('smooth').interface.mem_gb = 3
  240. susan.get_node('outputnode').interface.num_threads = 1
  241. susan.get_node('outputnode').interface.mem_gb = 0.1
  242. # ======================================================================
  243. # DEFINE NODE: FUNCTION TO GET THE SUBJECT-SPECIFIC INFORMATION
  244. # ======================================================================
  245. subject_info = MapNode(Function(
  246. input_names=['events', 'confounds'],
  247. output_names=['subject_info', 'event_names'],
  248. function=get_subject_info),
  249. name='subject_info', iterfield=['events', 'confounds'])
  250. # set expected thread and memory usage for the node:
  251. subject_info.interface.num_threads = 1
  252. subject_info.interface.mem_gb = 0.1
  253. # subject_info.inputs.events = selectfiles_results.outputs.events
  254. # subject_info.inputs.confounds = selectfiles_results.outputs.confounds
  255. # subject_info_results = subject_info.run()
  256. # subject_info_results.outputs.subject_info
  257. # ======================================================================
  258. # DEFINE NODE: REMOVE DUMMY VARIABLES (USING FSL ROI)
  259. # ======================================================================
  260. # function: extract region of interest (ROI) from an image
  261. trim = MapNode(ExtractROI(), name='trim', iterfield=['in_file'])
  262. # define index of the first selected volume (i.e., minimum index):
  263. trim.inputs.t_min = num_dummy
  264. # define the number of volumes selected starting at the minimum index:
  265. trim.inputs.t_size = -1
  266. # define the fsl output type:
  267. trim.inputs.output_type = 'NIFTI'
  268. # set expected thread and memory usage for the node:
  269. trim.interface.num_threads = 1
  270. trim.interface.mem_gb = 3
  271. # ======================================================================
  272. # DEFINE NODE: LEAVE-ONE-RUN-OUT SELECTION OF DATA
  273. # ======================================================================
  274. leave_one_run_out = Node(Function(
  275. input_names=['subject_info', 'event_names', 'data_func', 'run'],
  276. output_names=['subject_info', 'data_func', 'contrasts'],
  277. function=leave_one_out),
  278. name='leave_one_run_out')
  279. # define the number of rows as an iterable:
  280. leave_one_run_out.iterables = ('run', range(num_runs))
  281. # ======================================================================
  282. # DEFINE NODE: SPECIFY SPM MODEL (GENERATE SPM-SPECIFIC MODEL)
  283. # ======================================================================
  284. # function: makes a model specification compatible with spm designers
  285. # adds SPM specific options to SpecifyModel
  286. l1model = Node(SpecifySPMModel(), name="l1model")
  287. # input: concatenate runs to a single session (boolean, default: False):
  288. l1model.inputs.concatenate_runs = False
  289. # input: units of event onsets and durations (secs or scans):
  290. l1model.inputs.input_units = 'secs'
  291. # input: units of design event onsets and durations (secs or scans):
  292. l1model.inputs.output_units = 'secs'
  293. # input: time of repetition (a float):
  294. l1model.inputs.time_repetition = time_repetition
  295. # high-pass filter cutoff in secs (a float, default = 128 secs):
  296. l1model.inputs.high_pass_filter_cutoff = 128
  297. # ======================================================================
  298. # DEFINE NODE: LEVEL 1 DESIGN (GENERATE AN SPM DESIGN MATRIX)
  299. # ======================================================================
  300. # function: generate an SPM design matrix
  301. l1design = Node(Level1Design(), name="l1design")
  302. # input: (a dictionary with keys which are 'hrf' or 'fourier' or
  303. # 'fourier_han' or 'gamma' or 'fir' and with values which are any value)
  304. l1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
  305. # input: units for specification of onsets ('secs' or 'scans'):
  306. l1design.inputs.timing_units = 'secs'
  307. # input: interscan interval / repetition time in secs (a float):
  308. l1design.inputs.interscan_interval = time_repetition
  309. # input: Model serial correlations AR(1), FAST or none:
  310. l1design.inputs.model_serial_correlations = 'AR(1)'
  311. # input: number of time-bins per scan in secs (an integer):
  312. l1design.inputs.microtime_resolution = 16
  313. # input: the onset/time-bin in seconds for alignment (a float):
  314. l1design.inputs.microtime_onset = 1
  315. # set expected thread and memory usage for the node:
  316. l1design.interface.num_threads = 1
  317. l1design.interface.mem_gb = 2
  318. # ======================================================================
  319. # DEFINE NODE: ESTIMATE MODEL (ESTIMATE THE PARAMETERS OF THE MODEL)
  320. # ======================================================================
  321. # function: use spm_spm to estimate the parameters of a model
  322. l1estimate = Node(EstimateModel(), name="l1estimate")
  323. # input: (a dictionary with keys which are 'Classical' or 'Bayesian2'
  324. # or 'Bayesian' and with values which are any value)
  325. l1estimate.inputs.estimation_method = {'Classical': 1}
  326. # set expected thread and memory usage for the node:
  327. l1estimate.interface.num_threads = 1
  328. l1estimate.interface.mem_gb = 2
  329. # ======================================================================
  330. # DEFINE NODE: ESTIMATE CONTRASTS (ESTIMATES THE CONTRASTS)
  331. # ======================================================================
  332. # function: use spm_contrasts to estimate contrasts of interest
  333. l1contrasts = Node(EstimateContrast(), name="l1contrasts")
  334. # input: list of contrasts with each contrast being a list of the form:
  335. # [('name', 'stat', [condition list], [weight list], [session list])]:
  336. # l1contrasts.inputs.contrasts = l1contrasts_list
  337. # node input: overwrite previous results:
  338. l1contrasts.overwrite = True
  339. # set expected thread and memory usage for the node:
  340. l1contrasts.interface.num_threads = 1
  341. l1contrasts.interface.mem_gb = 1.5
  342. # ======================================================================
  343. # DEFINE NODE: FUNCTION TO PLOT CONTRASTS
  344. # ======================================================================
  345. plot_contrasts = MapNode(Function(
  346. input_names=['anat', 'stat_map', 'thresh'],
  347. output_names=['out_path'],
  348. function=plot_stat_maps),
  349. name='plot_contrasts', iterfield=['thresh'])
  350. # input: plot data with set of different thresholds:
  351. plot_contrasts.inputs.thresh = [None, 1, 2, 3]
  352. # set expected thread and memory usage for the node:
  353. plot_contrasts.interface.num_threads = 1
  354. plot_contrasts.interface.mem_gb = 0.2
  355. # ======================================================================
  356. # DEFINE NODE: THRESHOLD
  357. # ======================================================================
  358. # function: Topological FDR thresholding based on cluster extent/size.
  359. # Smoothness is estimated from GLM residuals but is assumed to be the
  360. # same for all of the voxels.
  361. thresh = Node(Threshold(), name="thresh")
  362. # input: whether to use FWE (Bonferroni) correction for initial threshold
  363. # (a boolean, nipype default value: True):
  364. thresh.inputs.use_fwe_correction = False
  365. # input: whether to use FDR over cluster extent probabilities (boolean)
  366. thresh.inputs.use_topo_fdr = True
  367. # input: value for initial thresholding (defining clusters):
  368. thresh.inputs.height_threshold = 0.05
  369. # input: is the cluster forming threshold a stat value or p-value?
  370. # ('p-value' or 'stat', nipype default value: p-value):
  371. thresh.inputs.height_threshold_type = 'p-value'
  372. # input: which contrast in the SPM.mat to use (an integer):
  373. thresh.inputs.contrast_index = 1
  374. # input: p threshold on FDR corrected cluster size probabilities (float):
  375. thresh.inputs.extent_fdr_p_threshold = 0.05
  376. # input: minimum cluster size in voxels (an integer, default = 0):
  377. thresh.inputs.extent_threshold = 0
  378. # set expected thread and memory usage for the node:
  379. thresh.interface.num_threads = 1
  380. thresh.interface.mem_gb = 0.2
  381. # ======================================================================
  382. # DEFINE NODE: THRESHOLD STATISTICS
  383. # ======================================================================
  384. # function: Given height and cluster size threshold calculate
  385. # theoretical probabilities concerning false positives
  386. thresh_stat = Node(ThresholdStatistics(), name="thresh_stat")
  387. # input: which contrast in the SPM.mat to use (an integer):
  388. thresh_stat.inputs.contrast_index = 1
  389. # ======================================================================
  390. # CREATE DATASINK NODE (OUTPUT STREAM):
  391. # ======================================================================
  392. # create a node of the function:
  393. l1datasink = Node(DataSink(), name='datasink')
  394. # assign the path to the base directory:
  395. l1datasink.inputs.base_directory = opj(path_root, 'l1pipeline')
  396. # create a list of substitutions to adjust the file paths of datasink:
  397. substitutions = [('_subject_id_', '')]
  398. # assign the substitutions to the datasink command:
  399. l1datasink.inputs.substitutions = substitutions
  400. # determine whether to store output in parameterized form:
  401. l1datasink.inputs.parameterization = True
  402. # set expected thread and memory usage for the node:
  403. l1datasink.interface.num_threads = 1
  404. l1datasink.interface.mem_gb = 0.2
  405. # ======================================================================
  406. # DEFINE THE LEVEL 1 ANALYSIS SUB-WORKFLOW AND CONNECT THE NODES:
  407. # ======================================================================
  408. # initiation of the 1st-level analysis workflow:
  409. l1analysis = Workflow(name='l1analysis')
  410. # connect the 1st-level analysis components
  411. l1analysis.connect(l1model, 'session_info', l1design, 'session_info')
  412. l1analysis.connect(l1design, 'spm_mat_file', l1estimate, 'spm_mat_file')
  413. l1analysis.connect(l1estimate, 'spm_mat_file', l1contrasts, 'spm_mat_file')
  414. l1analysis.connect(l1estimate, 'beta_images', l1contrasts, 'beta_images')
  415. l1analysis.connect(l1estimate, 'residual_image', l1contrasts, 'residual_image')
  416. # ======================================================================
  417. # DEFINE META-WORKFLOW PIPELINE:
  418. # ======================================================================
  419. # initiation of the 1st-level analysis workflow:
  420. l1pipeline = Workflow(name='l1pipeline')
  421. # stop execution of the workflow if an error is encountered:
  422. l1pipeline.config = {'execution': {'stop_on_first_crash': True,
  423. 'hash_method': 'timestamp'}}
  424. # define the base directory of the workflow:
  425. l1pipeline.base_dir = opj(path_root, 'work')
  426. # ======================================================================
  427. # ENABLE LOGGING:
  428. # ======================================================================
  429. # enable logging to file:
  430. #config.enable_debug_mode()
  431. #config.update_config({'logging': {'log_directory': os.getcwd(),
  432. # 'log_to_file': True}})
  433. #logging.update_logging(config)
  434. # ======================================================================
  435. # CONNECT WORKFLOW NODES:
  436. # ======================================================================
  437. # connect infosource to selectfiles node:
  438. l1pipeline.connect(infosource, 'subject_id', selectfiles, 'subject_id')
  439. # generate subject specific events and regressors to subject_info:
  440. l1pipeline.connect(selectfiles, 'events', subject_info, 'events')
  441. l1pipeline.connect(selectfiles, 'confounds', subject_info, 'confounds')
  442. # connect functional files to smoothing workflow:
  443. l1pipeline.connect(selectfiles, 'func', susan, 'inputnode.in_files')
  444. l1pipeline.connect(selectfiles, 'wholemask', susan, 'inputnode.mask_file')
  445. l1pipeline.connect(susan, 'outputnode.smoothed_files', l1datasink, 'smooth')
  446. # connect smoothed functional data to the trimming node:
  447. l1pipeline.connect(susan, 'outputnode.smoothed_files', trim, 'in_file')
  448. # ======================================================================
  449. # INPUT AND OUTPUT STREAM FOR THE LEVEL 1 SPM ANALYSIS SUB-WORKFLOW:
  450. # ======================================================================
  451. # connect regressors to the subsetting node::
  452. l1pipeline.connect(subject_info, 'subject_info', leave_one_run_out, 'subject_info')
  453. # connect event_names to the subsetting node:
  454. l1pipeline.connect(subject_info, 'event_names', leave_one_run_out, 'event_names')
  455. # connect smoothed and trimmed data to subsetting node:
  456. l1pipeline.connect(trim, 'roi_file', leave_one_run_out, 'data_func')
  457. # connect regressors to the level 1 model specification node:
  458. l1pipeline.connect(leave_one_run_out, 'subject_info', l1analysis, 'l1model.subject_info')
  459. # connect smoothed and trimmed data to the level 1 model specification:
  460. l1pipeline.connect(leave_one_run_out, 'data_func', l1analysis, 'l1model.functional_runs')
  461. # connect l1 contrast specification to contrast estimation
  462. l1pipeline.connect(leave_one_run_out, 'contrasts', l1analysis, 'l1contrasts.contrasts')
  463. # connect the anatomical image to the plotting node:
  464. l1pipeline.connect(selectfiles, 'anat', plot_contrasts, 'anat')
  465. # connect spm t-images to the plotting node:
  466. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', plot_contrasts, 'stat_map')
  467. # connect the t-images and spm mat file to the threshold node:
  468. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', thresh, 'stat_image')
  469. l1pipeline.connect(l1analysis, 'l1contrasts.spm_mat_file', thresh, 'spm_mat_file')
  470. # connect all output results of the level 1 analysis to the datasink:
  471. l1pipeline.connect(l1analysis, 'l1estimate.beta_images', l1datasink, 'estimates.@beta_images')
  472. l1pipeline.connect(l1analysis, 'l1estimate.residual_image', l1datasink, 'estimates.@residual_image')
  473. l1pipeline.connect(l1analysis, 'l1contrasts.spm_mat_file', l1datasink, 'contrasts.@spm_mat')
  474. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', l1datasink, 'contrasts.@spmT')
  475. l1pipeline.connect(l1analysis, 'l1contrasts.con_images', l1datasink, 'contrasts.@con')
  476. l1pipeline.connect(plot_contrasts, 'out_path', l1datasink, 'contrasts.@out_path')
  477. l1pipeline.connect(thresh, 'thresholded_map', l1datasink, 'thresh.@threshhold_map')
  478. l1pipeline.connect(thresh, 'pre_topo_fdr_map', l1datasink, 'thresh.@pre_topo_fdr_map')
  479. # ======================================================================
  480. # WRITE GRAPH AND EXECUTE THE WORKFLOW
  481. # ======================================================================
  482. # write the graph:
  483. l1pipeline.write_graph(graph2use='colored', simple_form=True)
  484. # set the maximum resources the workflow can utilize:
  485. # args_dict = {'status_callback' : log_nodes_cb}
  486. # execute the workflow depending on the operating system:
  487. if 'darwin' in sys.platform:
  488. # will execute the workflow using all available cpus:
  489. l1pipeline.run(plugin='MultiProc')
  490. elif 'linux' in sys.platform:
  491. l1pipeline.run(plugin='PBS', plugin_args=dict(template=job_template))