highspeed-glm-main.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # ======================================================================
  4. # SCRIPT INFORMATION:
  5. # ======================================================================
  6. # SCRIPT: FIRST LEVEL GLM
  7. # PROJECT: HIGHSPEED
  8. # WRITTEN BY LENNART WITTKUHN, 2018 - 2020
  9. # CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
  10. # MAX PLANCK RESEARCH GROUP NEUROCODE
  11. # MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
  12. # MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
  13. # LENTZEALLEE 94, 14195 BERLIN, GERMANY
  14. # ACKNOWLEDGEMENTS: THANKS TO HRVOJE STOJIC (UCL) FOR HELP
  15. # ======================================================================
  16. # IMPORT RELEVANT PACKAGES
  17. # ======================================================================
  18. # import basic libraries:
  19. import os
  20. import glob
  21. import sys
  22. import warnings
  23. from os.path import join as opj
  24. # import nipype libraries:
  25. from nipype.interfaces.utility import Function, IdentityInterface
  26. from nipype.interfaces.io import SelectFiles, DataSink
  27. from nipype.pipeline.engine import Workflow, Node, MapNode
  28. from nipype.utils.profiler import log_nodes_cb
  29. from nipype import config, logging
  30. # import spm and matlab interfaces:
  31. from nipype.algorithms.modelgen import SpecifySPMModel
  32. from nipype.interfaces.spm.model import (
  33. Level1Design, EstimateModel, EstimateContrast, ThresholdStatistics,
  34. Threshold)
  35. from nipype.interfaces.matlab import MatlabCommand
  36. from nipype.interfaces import spm
  37. # import fsl interfaces:
  38. from nipype.workflows.fmri.fsl import create_susan_smooth
  39. from nipype.interfaces.fsl.utils import ExtractROI
  40. # import libraries for bids interaction:
  41. from bids.layout import BIDSLayout
  42. # import freesurfer interfaces:
  43. # import custom functions:
  44. from highspeed_glm_functions import (
  45. get_subject_info, plot_stat_maps, leave_one_out)
  46. import datalad.api as dl
  47. # ======================================================================
  48. # ENVIRONMENT SETTINGS (DEALING WITH ERRORS AND WARNINGS):
  49. # ======================================================================
  50. # set the fsl output type environment variable:
  51. os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ'
  52. # deal with nipype-related warnings:
  53. os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
  54. # inhibit CTF lock
  55. os.environ['MCR_INHIBIT_CTF_LOCK'] = '1'
  56. # filter out warnings related to the numpy package:
  57. warnings.filterwarnings("ignore", message="numpy.dtype size changed*")
  58. warnings.filterwarnings("ignore", message="numpy.ufunc size changed*")
  59. # ======================================================================
  60. # SET PATHS AND SUBJECTS
  61. # ======================================================================
  62. # define paths depending on the operating system (OS) platform:
  63. project = 'highspeed'
  64. # initialize empty paths:
  65. path_root = None
  66. sub_list = None
  67. # path to the project root:
  68. project_name = 'highspeed-glm'
  69. path_root = os.getenv('PWD').split(project_name)[0] + project_name
  70. if 'darwin' in sys.platform:
  71. path_spm = '/Users/Shared/spm12'
  72. path_matlab = '/Applications/MATLAB_R2017a.app/bin/matlab -nodesktop -nosplash'
  73. # set paths for spm:
  74. spm.SPMCommand.set_mlab_paths(paths=path_spm, matlab_cmd=path_matlab)
  75. MatlabCommand.set_default_paths(path_spm)
  76. MatlabCommand.set_default_matlab_cmd(path_matlab)
  77. sub_list = ['sub-01']
  78. elif 'linux' in sys.platform:
  79. # path_matlab = '/home/mpib/wittkuhn/spm12.simg eval \$SPMMCRCMD'
  80. # path_matlab = opj('/home', 'beegfs', 'wittkuhn', 'tools', 'spm', 'spm12.simg eval \$SPMMCRCMD')
  81. singularity_cmd = 'singularity run -B /home/mpib/wittkuhn -B /mnt/beegfs/home/wittkuhn /home/mpib/wittkuhn/highspeed/highspeed-glm/tools/spm/spm12.simg'
  82. singularity_spm = 'eval \$SPMMCRCMD'
  83. path_matlab = ' '.join([singularity_cmd, singularity_spm])
  84. spm.SPMCommand.set_mlab_paths(matlab_cmd=path_matlab, use_mcr=True)
  85. # grab the list of subjects from the bids data set:
  86. layout = BIDSLayout(opj(path_root, 'bids'))
  87. # get all subject ids:
  88. sub_list = sorted(layout.get_subjects())
  89. # create a template to add the "sub-" prefix to the ids
  90. sub_template = ['sub-'] * len(sub_list)
  91. # add the prefix to all ids:
  92. sub_list = ["%s%s" % t for t in zip(sub_template, sub_list)]
  93. # if user defined to run specific subject
  94. sub_list = sub_list[int(sys.argv[1]):int(sys.argv[2])]
  95. # print the SPM version:
  96. print('using SPM version %s' % spm.SPMCommand().version)
  97. # ======================================================================
  98. # DEFINE PBS CLUSTER JOB TEMPLATE (NEEDED WHEN RUNNING ON THE CLUSTER):
  99. # ======================================================================
  100. job_template = """
  101. #PBS -l walltime=10:00:00
  102. #PBS -j oe
  103. #PBS -o /home/mpib/wittkuhn/highspeed/highspeed-glm/logs/glm
  104. #PBS -m n
  105. #PBS -v FSLOUTPUTTYPE=NIFTI_GZ
  106. source /etc/bash_completion.d/virtualenvwrapper
  107. workon highspeed-glm
  108. module load fsl/5.0
  109. module load matlab/R2017b
  110. module load freesurfer/6.0.0
  111. """
  112. # ======================================================================
  113. # SETTING UP LOGGING
  114. # ======================================================================
  115. #path_log = opj(path_root, 'logs', 'l1analyis')
  116. # enable debug mode for logging and configuration:
  117. #config.enable_debug_mode()
  118. # enable logging to file and provide path to the logging file:
  119. #config.update_config({'logging': {'log_directory': path_log,
  120. # 'log_to_file': True},
  121. # 'execution': {'stop_on_first_crash': False,
  122. # 'keep_unnecessary_outputs': 'false'},
  123. # 'monitoring': {'enabled': True}
  124. # })
  125. # update the global logging settings:
  126. # logging.update_logging(config)
  127. # callback_log_path = opj(path_log,'ressources.log')
  128. # logger = logging.getLogger('callback')
  129. # logger.setLevel(logging.DEBUG)
  130. # handler = logging.FileHandler(callback_log_path)
  131. # logger.addHandler(handler)
  132. # ======================================================================
  133. # SETTING UP LOGGING
  134. # ======================================================================
  135. #path_log = opj(path_root, 'logs', 'l1analyis')
  136. ## create directory to save the log files if it does not exist yet:
  137. #if not os.path.exists(path_log):
  138. # os.makedirs(path_log)
  139. ## configure logging:
  140. #logging.basicConfig(
  141. # filename=opj(path_log,'log_l1analysis.log'),
  142. # level=logging.DEBUG,
  143. # filemode = "a+",
  144. # format='%(asctime)s - %(levelname)s - %(message)s',
  145. # datefmt='%d/%m/%Y %H:%M:%S')
  146. #logging.getLogger().addHandler(logging.StreamHandler())
  147. # ======================================================================
  148. # LOG INFORMATION ABOUT THE EXECUTION
  149. # ======================================================================
  150. # print the loaded SPM version:
  151. #analysis_name = "Level 1 GLM analysis"
  152. #logging.info("--------------------------------------------------------")
  153. #logging.info("Analysis: " + analysis_name)
  154. #logging.info("SPM version: " + (spm.SPMCommand().version))
  155. #logging.info("List of subjects:")
  156. #logging.info(sub_list)
  157. # ======================================================================
  158. # DEFINE SETTINGS
  159. # ======================================================================
  160. # time of repetition, in seconds:
  161. time_repetition = 1.25
  162. # total number of runs:
  163. num_runs = 8
  164. # smoothing kernel, in mm:
  165. fwhm = 4
  166. # number of dummy variables to remove from each run:
  167. num_dummy = 0
  168. # ======================================================================
  169. # DEFINE NODE: INFOSOURCE
  170. # ======================================================================
  171. # define the infosource node that collects the data:
  172. infosource = Node(IdentityInterface(
  173. fields=['subject_id']), name='infosource')
  174. # let the node iterate (paralellize) over all subjects:
  175. infosource.iterables = [('subject_id', sub_list)]
  176. # ======================================================================
  177. # DEFINE SELECTFILES NODE
  178. # ======================================================================
  179. path_confounds = glob.glob(opj(
  180. path_root, 'fmriprep', 'fmriprep', '*', '*',
  181. 'func', '*highspeed*confounds_regressors.tsv'))
  182. path_events = glob.glob(opj(
  183. path_root, 'bids', '*', '*', 'func', '*events.tsv'))
  184. path_func = glob.glob(opj(
  185. path_root, 'fmriprep', 'fmriprep', '*', '*',
  186. 'func', '*highspeed*space-T1w*preproc_bold.nii.gz'))
  187. path_anat = glob.glob(opj(
  188. path_root, 'fmriprep', 'fmriprep', '*',
  189. 'anat', '*_desc-preproc_T1w.nii.gz'))
  190. path_wholemask = glob.glob(opj(
  191. path_root, 'fmriprep', 'fmriprep', '*', '*',
  192. 'func', '*highspeed*space-T1w*brain_mask.nii.gz'))
  193. dl.get(path_confounds)
  194. dl.get(path_events)
  195. dl.get(path_func)
  196. dl.get(path_anat)
  197. dl.get(path_wholemask)
  198. # define all relevant files paths:
  199. templates = dict(
  200. confounds=opj(path_root, 'fmriprep', 'fmriprep', '{subject_id}',
  201. '*', 'func', '*highspeed*confounds_regressors.tsv'),
  202. events=opj(path_root, 'bids', '{subject_id}', '*', 'func',
  203. '*events.tsv'),
  204. func=opj(path_root, 'fmriprep', 'fmriprep', '{subject_id}', '*',
  205. 'func', '*highspeed*space-T1w*preproc_bold.nii.gz'),
  206. anat=opj(path_root, 'fmriprep', 'fmriprep', '{subject_id}',
  207. 'anat', '{subject_id}_desc-preproc_T1w.nii.gz'),
  208. wholemask=opj(path_root, 'fmriprep', 'fmriprep', '{subject_id}',
  209. '*', 'func', '*highspeed*space-T1w*brain_mask.nii.gz'),
  210. )
  211. # define the selectfiles node:
  212. selectfiles = Node(SelectFiles(templates, sort_filelist=True),
  213. name='selectfiles')
  214. # set expected thread and memory usage for the node:
  215. selectfiles.interface.num_threads = 1
  216. selectfiles.interface.mem_gb = 0.1
  217. # selectfiles.inputs.subject_id = 'sub-20'
  218. # selectfiles_results = selectfiles.run()
  219. # ======================================================================
  220. # DEFINE CREATE_SUSAN_SMOOTH WORKFLOW NODE
  221. # ======================================================================
  222. # define the susan smoothing node and specify the smoothing fwhm:
  223. susan = create_susan_smooth()
  224. # set the smoothing kernel:
  225. susan.inputs.inputnode.fwhm = fwhm
  226. # set expected thread and memory usage for the nodes:
  227. susan.get_node('inputnode').interface.num_threads = 1
  228. susan.get_node('inputnode').interface.mem_gb = 0.1
  229. susan.get_node('median').interface.num_threads = 1
  230. susan.get_node('median').interface.mem_gb = 3
  231. susan.get_node('mask').interface.num_threads = 1
  232. susan.get_node('mask').interface.mem_gb = 3
  233. susan.get_node('meanfunc2').interface.num_threads = 1
  234. susan.get_node('meanfunc2').interface.mem_gb = 3
  235. susan.get_node('merge').interface.num_threads = 1
  236. susan.get_node('merge').interface.mem_gb = 3
  237. susan.get_node('multi_inputs').interface.num_threads = 1
  238. susan.get_node('multi_inputs').interface.mem_gb = 3
  239. susan.get_node('smooth').interface.num_threads = 1
  240. susan.get_node('smooth').interface.mem_gb = 3
  241. susan.get_node('outputnode').interface.num_threads = 1
  242. susan.get_node('outputnode').interface.mem_gb = 0.1
  243. # ======================================================================
  244. # DEFINE NODE: FUNCTION TO GET THE SUBJECT-SPECIFIC INFORMATION
  245. # ======================================================================
  246. subject_info = MapNode(Function(
  247. input_names=['events', 'confounds'],
  248. output_names=['subject_info', 'event_names'],
  249. function=get_subject_info),
  250. name='subject_info', iterfield=['events', 'confounds'])
  251. # set expected thread and memory usage for the node:
  252. subject_info.interface.num_threads = 1
  253. subject_info.interface.mem_gb = 0.1
  254. # subject_info.inputs.events = selectfiles_results.outputs.events
  255. # subject_info.inputs.confounds = selectfiles_results.outputs.confounds
  256. # subject_info_results = subject_info.run()
  257. # subject_info_results.outputs.subject_info
  258. # ======================================================================
  259. # DEFINE NODE: REMOVE DUMMY VARIABLES (USING FSL ROI)
  260. # ======================================================================
  261. # function: extract region of interest (ROI) from an image
  262. trim = MapNode(ExtractROI(), name='trim', iterfield=['in_file'])
  263. # define index of the first selected volume (i.e., minimum index):
  264. trim.inputs.t_min = num_dummy
  265. # define the number of volumes selected starting at the minimum index:
  266. trim.inputs.t_size = -1
  267. # define the fsl output type:
  268. trim.inputs.output_type = 'NIFTI'
  269. # set expected thread and memory usage for the node:
  270. trim.interface.num_threads = 1
  271. trim.interface.mem_gb = 3
  272. # ======================================================================
  273. # DEFINE NODE: LEAVE-ONE-RUN-OUT SELECTION OF DATA
  274. # ======================================================================
  275. leave_one_run_out = Node(Function(
  276. input_names=['subject_info', 'event_names', 'data_func', 'run'],
  277. output_names=['subject_info', 'data_func', 'contrasts'],
  278. function=leave_one_out),
  279. name='leave_one_run_out')
  280. # define the number of rows as an iterable:
  281. leave_one_run_out.iterables = ('run', range(num_runs))
  282. # ======================================================================
  283. # DEFINE NODE: SPECIFY SPM MODEL (GENERATE SPM-SPECIFIC MODEL)
  284. # ======================================================================
  285. # function: makes a model specification compatible with spm designers
  286. # adds SPM specific options to SpecifyModel
  287. l1model = Node(SpecifySPMModel(), name="l1model")
  288. # input: concatenate runs to a single session (boolean, default: False):
  289. l1model.inputs.concatenate_runs = False
  290. # input: units of event onsets and durations (secs or scans):
  291. l1model.inputs.input_units = 'secs'
  292. # input: units of design event onsets and durations (secs or scans):
  293. l1model.inputs.output_units = 'secs'
  294. # input: time of repetition (a float):
  295. l1model.inputs.time_repetition = time_repetition
  296. # high-pass filter cutoff in secs (a float, default = 128 secs):
  297. l1model.inputs.high_pass_filter_cutoff = 128
  298. # ======================================================================
  299. # DEFINE NODE: LEVEL 1 DESIGN (GENERATE AN SPM DESIGN MATRIX)
  300. # ======================================================================
  301. # function: generate an SPM design matrix
  302. l1design = Node(Level1Design(), name="l1design")
  303. # input: (a dictionary with keys which are 'hrf' or 'fourier' or
  304. # 'fourier_han' or 'gamma' or 'fir' and with values which are any value)
  305. l1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
  306. # input: units for specification of onsets ('secs' or 'scans'):
  307. l1design.inputs.timing_units = 'secs'
  308. # input: interscan interval / repetition time in secs (a float):
  309. l1design.inputs.interscan_interval = time_repetition
  310. # input: Model serial correlations AR(1), FAST or none:
  311. l1design.inputs.model_serial_correlations = 'AR(1)'
  312. # input: number of time-bins per scan in secs (an integer):
  313. l1design.inputs.microtime_resolution = 16
  314. # input: the onset/time-bin in seconds for alignment (a float):
  315. l1design.inputs.microtime_onset = 1
  316. # set expected thread and memory usage for the node:
  317. l1design.interface.num_threads = 1
  318. l1design.interface.mem_gb = 2
  319. # ======================================================================
  320. # DEFINE NODE: ESTIMATE MODEL (ESTIMATE THE PARAMETERS OF THE MODEL)
  321. # ======================================================================
  322. # function: use spm_spm to estimate the parameters of a model
  323. l1estimate = Node(EstimateModel(), name="l1estimate")
  324. # input: (a dictionary with keys which are 'Classical' or 'Bayesian2'
  325. # or 'Bayesian' and with values which are any value)
  326. l1estimate.inputs.estimation_method = {'Classical': 1}
  327. # set expected thread and memory usage for the node:
  328. l1estimate.interface.num_threads = 1
  329. l1estimate.interface.mem_gb = 2
  330. # ======================================================================
  331. # DEFINE NODE: ESTIMATE CONTRASTS (ESTIMATES THE CONTRASTS)
  332. # ======================================================================
  333. # function: use spm_contrasts to estimate contrasts of interest
  334. l1contrasts = Node(EstimateContrast(), name="l1contrasts")
  335. # input: list of contrasts with each contrast being a list of the form:
  336. # [('name', 'stat', [condition list], [weight list], [session list])]:
  337. # l1contrasts.inputs.contrasts = l1contrasts_list
  338. # node input: overwrite previous results:
  339. l1contrasts.overwrite = True
  340. # set expected thread and memory usage for the node:
  341. l1contrasts.interface.num_threads = 1
  342. l1contrasts.interface.mem_gb = 1.5
  343. # ======================================================================
  344. # DEFINE NODE: FUNCTION TO PLOT CONTRASTS
  345. # ======================================================================
  346. plot_contrasts = MapNode(Function(
  347. input_names=['anat', 'stat_map', 'thresh'],
  348. output_names=['out_path'],
  349. function=plot_stat_maps),
  350. name='plot_contrasts', iterfield=['thresh'])
  351. # input: plot data with set of different thresholds:
  352. plot_contrasts.inputs.thresh = [None, 1, 2, 3]
  353. # set expected thread and memory usage for the node:
  354. plot_contrasts.interface.num_threads = 1
  355. plot_contrasts.interface.mem_gb = 0.2
  356. # ======================================================================
  357. # DEFINE NODE: THRESHOLD
  358. # ======================================================================
  359. # function: Topological FDR thresholding based on cluster extent/size.
  360. # Smoothness is estimated from GLM residuals but is assumed to be the
  361. # same for all of the voxels.
  362. thresh = Node(Threshold(), name="thresh")
  363. # input: whether to use FWE (Bonferroni) correction for initial threshold
  364. # (a boolean, nipype default value: True):
  365. thresh.inputs.use_fwe_correction = False
  366. # input: whether to use FDR over cluster extent probabilities (boolean)
  367. thresh.inputs.use_topo_fdr = True
  368. # input: value for initial thresholding (defining clusters):
  369. thresh.inputs.height_threshold = 0.05
  370. # input: is the cluster forming threshold a stat value or p-value?
  371. # ('p-value' or 'stat', nipype default value: p-value):
  372. thresh.inputs.height_threshold_type = 'p-value'
  373. # input: which contrast in the SPM.mat to use (an integer):
  374. thresh.inputs.contrast_index = 1
  375. # input: p threshold on FDR corrected cluster size probabilities (float):
  376. thresh.inputs.extent_fdr_p_threshold = 0.05
  377. # input: minimum cluster size in voxels (an integer, default = 0):
  378. thresh.inputs.extent_threshold = 0
  379. # set expected thread and memory usage for the node:
  380. thresh.interface.num_threads = 1
  381. thresh.interface.mem_gb = 0.2
  382. # ======================================================================
  383. # DEFINE NODE: THRESHOLD STATISTICS
  384. # ======================================================================
  385. # function: Given height and cluster size threshold calculate
  386. # theoretical probabilities concerning false positives
  387. thresh_stat = Node(ThresholdStatistics(), name="thresh_stat")
  388. # input: which contrast in the SPM.mat to use (an integer):
  389. thresh_stat.inputs.contrast_index = 1
  390. # ======================================================================
  391. # CREATE DATASINK NODE (OUTPUT STREAM):
  392. # ======================================================================
  393. # create a node of the function:
  394. l1datasink = Node(DataSink(), name='datasink')
  395. # assign the path to the base directory:
  396. l1datasink.inputs.base_directory = opj(path_root, 'l1pipeline')
  397. # create a list of substitutions to adjust the file paths of datasink:
  398. substitutions = [('_subject_id_', '')]
  399. # assign the substitutions to the datasink command:
  400. l1datasink.inputs.substitutions = substitutions
  401. # determine whether to store output in parameterized form:
  402. l1datasink.inputs.parameterization = True
  403. # set expected thread and memory usage for the node:
  404. l1datasink.interface.num_threads = 1
  405. l1datasink.interface.mem_gb = 0.2
  406. # ======================================================================
  407. # DEFINE THE LEVEL 1 ANALYSIS SUB-WORKFLOW AND CONNECT THE NODES:
  408. # ======================================================================
  409. # initiation of the 1st-level analysis workflow:
  410. l1analysis = Workflow(name='l1analysis')
  411. # connect the 1st-level analysis components
  412. l1analysis.connect(l1model, 'session_info', l1design, 'session_info')
  413. l1analysis.connect(l1design, 'spm_mat_file', l1estimate, 'spm_mat_file')
  414. l1analysis.connect(l1estimate, 'spm_mat_file', l1contrasts, 'spm_mat_file')
  415. l1analysis.connect(l1estimate, 'beta_images', l1contrasts, 'beta_images')
  416. l1analysis.connect(l1estimate, 'residual_image', l1contrasts, 'residual_image')
  417. # ======================================================================
  418. # DEFINE META-WORKFLOW PIPELINE:
  419. # ======================================================================
  420. # initiation of the 1st-level analysis workflow:
  421. l1pipeline = Workflow(name='l1pipeline')
  422. # stop execution of the workflow if an error is encountered:
  423. l1pipeline.config = {'execution': {'stop_on_first_crash': True,
  424. 'hash_method': 'timestamp'}}
  425. # define the base directory of the workflow:
  426. l1pipeline.base_dir = opj(path_root, 'work')
  427. # ======================================================================
  428. # ENABLE LOGGING:
  429. # ======================================================================
  430. # enable logging to file:
  431. #config.enable_debug_mode()
  432. #config.update_config({'logging': {'log_directory': os.getcwd(),
  433. # 'log_to_file': True}})
  434. #logging.update_logging(config)
  435. # ======================================================================
  436. # CONNECT WORKFLOW NODES:
  437. # ======================================================================
  438. # connect infosource to selectfiles node:
  439. l1pipeline.connect(infosource, 'subject_id', selectfiles, 'subject_id')
  440. # generate subject specific events and regressors to subject_info:
  441. l1pipeline.connect(selectfiles, 'events', subject_info, 'events')
  442. l1pipeline.connect(selectfiles, 'confounds', subject_info, 'confounds')
  443. # connect functional files to smoothing workflow:
  444. l1pipeline.connect(selectfiles, 'func', susan, 'inputnode.in_files')
  445. l1pipeline.connect(selectfiles, 'wholemask', susan, 'inputnode.mask_file')
  446. l1pipeline.connect(susan, 'outputnode.smoothed_files', l1datasink, 'smooth')
  447. # connect smoothed functional data to the trimming node:
  448. l1pipeline.connect(susan, 'outputnode.smoothed_files', trim, 'in_file')
  449. # ======================================================================
  450. # INPUT AND OUTPUT STREAM FOR THE LEVEL 1 SPM ANALYSIS SUB-WORKFLOW:
  451. # ======================================================================
  452. # connect regressors to the subsetting node::
  453. l1pipeline.connect(subject_info, 'subject_info', leave_one_run_out, 'subject_info')
  454. # connect event_names to the subsetting node:
  455. l1pipeline.connect(subject_info, 'event_names', leave_one_run_out, 'event_names')
  456. # connect smoothed and trimmed data to subsetting node:
  457. l1pipeline.connect(trim, 'roi_file', leave_one_run_out, 'data_func')
  458. # connect regressors to the level 1 model specification node:
  459. l1pipeline.connect(leave_one_run_out, 'subject_info', l1analysis, 'l1model.subject_info')
  460. # connect smoothed and trimmed data to the level 1 model specification:
  461. l1pipeline.connect(leave_one_run_out, 'data_func', l1analysis, 'l1model.functional_runs')
  462. # connect l1 contrast specification to contrast estimation
  463. l1pipeline.connect(leave_one_run_out, 'contrasts', l1analysis, 'l1contrasts.contrasts')
  464. # connect the anatomical image to the plotting node:
  465. l1pipeline.connect(selectfiles, 'anat', plot_contrasts, 'anat')
  466. # connect spm t-images to the plotting node:
  467. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', plot_contrasts, 'stat_map')
  468. # connect the t-images and spm mat file to the threshold node:
  469. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', thresh, 'stat_image')
  470. l1pipeline.connect(l1analysis, 'l1contrasts.spm_mat_file', thresh, 'spm_mat_file')
  471. # connect all output results of the level 1 analysis to the datasink:
  472. l1pipeline.connect(l1analysis, 'l1estimate.beta_images', l1datasink, 'estimates.@beta_images')
  473. l1pipeline.connect(l1analysis, 'l1estimate.residual_image', l1datasink, 'estimates.@residual_image')
  474. l1pipeline.connect(l1analysis, 'l1contrasts.spm_mat_file', l1datasink, 'contrasts.@spm_mat')
  475. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', l1datasink, 'contrasts.@spmT')
  476. l1pipeline.connect(l1analysis, 'l1contrasts.con_images', l1datasink, 'contrasts.@con')
  477. l1pipeline.connect(plot_contrasts, 'out_path', l1datasink, 'contrasts.@out_path')
  478. l1pipeline.connect(thresh, 'thresholded_map', l1datasink, 'thresh.@threshhold_map')
  479. l1pipeline.connect(thresh, 'pre_topo_fdr_map', l1datasink, 'thresh.@pre_topo_fdr_map')
  480. # ======================================================================
  481. # WRITE GRAPH AND EXECUTE THE WORKFLOW
  482. # ======================================================================
  483. # write the graph:
  484. l1pipeline.write_graph(graph2use='colored', simple_form=True)
  485. # set the maximum resources the workflow can utilize:
  486. # args_dict = {'status_callback' : log_nodes_cb}
  487. # execute the workflow depending on the operating system:
  488. if 'darwin' in sys.platform:
  489. # will execute the workflow using all available cpus:
  490. l1pipeline.run(plugin='MultiProc')
  491. elif 'linux' in sys.platform:
  492. l1pipeline.run(plugin='PBS', plugin_args=dict(template=job_template))