highspeed-glm-main.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # ======================================================================
  4. # SCRIPT INFORMATION:
  5. # ======================================================================
  6. # SCRIPT: FIRST LEVEL GLM
  7. # PROJECT: HIGHSPEED
  8. # WRITTEN BY LENNART WITTKUHN, 2018 - 2020
  9. # CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
  10. # MAX PLANCK RESEARCH GROUP NEUROCODE
  11. # MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
  12. # MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
  13. # LENTZEALLEE 94, 14195 BERLIN, GERMANY
  14. # ACKNOWLEDGEMENTS: THANKS TO HRVOJE STOJIC (UCL) FOR HELP
  15. # ======================================================================
  16. # IMPORT RELEVANT PACKAGES
  17. # ======================================================================
  18. # import basic libraries:
  19. import os
  20. import sys
  21. import warnings
  22. from os.path import join as opj
  23. # import nipype libraries:
  24. from nipype.interfaces.utility import Function, IdentityInterface
  25. from nipype.interfaces.io import SelectFiles, DataSink
  26. from nipype.pipeline.engine import Workflow, Node, MapNode
  27. from nipype.utils.profiler import log_nodes_cb
  28. from nipype import config, logging
  29. # import spm and matlab interfaces:
  30. from nipype.algorithms.modelgen import SpecifySPMModel
  31. from nipype.interfaces.spm.model import (
  32. Level1Design, EstimateModel, EstimateContrast, ThresholdStatistics,
  33. Threshold)
  34. from nipype.interfaces.matlab import MatlabCommand
  35. from nipype.interfaces import spm
  36. # import fsl interfaces:
  37. from nipype.workflows.fmri.fsl import create_susan_smooth
  38. from nipype.interfaces.fsl.utils import ExtractROI
  39. # import libraries for bids interaction:
  40. from bids.layout import BIDSLayout
  41. # import freesurfer interfaces:
  42. # import custom functions:
  43. from highspeed_glm_functions import (
  44. get_subject_info, plot_stat_maps, leave_one_out)
  45. # ======================================================================
  46. # ENVIRONMENT SETTINGS (DEALING WITH ERRORS AND WARNINGS):
  47. # ======================================================================
  48. # set the fsl output type environment variable:
  49. os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ'
  50. # deal with nipype-related warnings:
  51. os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
  52. # inhibit CTF lock
  53. os.environ['MCR_INHIBIT_CTF_LOCK'] = '1'
  54. # filter out warnings related to the numpy package:
  55. warnings.filterwarnings("ignore", message="numpy.dtype size changed*")
  56. warnings.filterwarnings("ignore", message="numpy.ufunc size changed*")
  57. # ======================================================================
  58. # SET PATHS AND SUBJECTS
  59. # ======================================================================
  60. # define paths depending on the operating system (OS) platform:
  61. project = 'highspeed'
  62. # initialize empty paths:
  63. path_root = None
  64. sub_list = None
  65. if 'darwin' in sys.platform:
  66. path_root = opj('/Users', 'wittkuhn', 'Volumes', 'tardis_beegfs', project)
  67. path_spm = '/Users/Shared/spm12'
  68. path_matlab = '/Applications/MATLAB_R2017a.app/bin/matlab -nodesktop -nosplash'
  69. # set paths for spm:
  70. spm.SPMCommand.set_mlab_paths(paths=path_spm, matlab_cmd=path_matlab)
  71. MatlabCommand.set_default_paths(path_spm)
  72. MatlabCommand.set_default_matlab_cmd(path_matlab)
  73. sub_list = ['sub-01']
  74. elif 'linux' in sys.platform:
  75. path_root = opj('/home', 'mpib', 'wittkuhn', project, 'highspeed-glm')
  76. # path_matlab = '/home/mpib/wittkuhn/spm12.simg eval \$SPMMCRCMD'
  77. # path_matlab = opj('/home', 'beegfs', 'wittkuhn', 'tools', 'spm', 'spm12.simg eval \$SPMMCRCMD')
  78. singularity_cmd = 'singularity run -B /home/mpib/wittkuhn -B /mnt/beegfs/home/wittkuhn /home/mpib/wittkuhn/highspeed/highspeed-glm/tools/spm/spm12.simg'
  79. singularity_spm = 'eval \$SPMMCRCMD'
  80. path_matlab = ' '.join([singularity_cmd, singularity_spm])
  81. spm.SPMCommand.set_mlab_paths(matlab_cmd=path_matlab, use_mcr=True)
  82. # grab the list of subjects from the bids data set:
  83. path_bids = opj(path_root, 'bids')
  84. layout = BIDSLayout(path_bids)
  85. # get all subject ids:
  86. sub_list = sorted(layout.get_subjects())
  87. # create a template to add the "sub-" prefix to the ids
  88. sub_template = ['sub-'] * len(sub_list)
  89. # add the prefix to all ids:
  90. sub_list = ["%s%s" % t for t in zip(sub_template, sub_list)]
  91. # if user defined to run specific subject
  92. sub_list = sub_list[int(sys.argv[1]):int(sys.argv[2])]
  93. # print the SPM version:
  94. print('using SPM version %s' % spm.SPMCommand().version)
  95. # ======================================================================
  96. # DEFINE PBS CLUSTER JOB TEMPLATE (NEEDED WHEN RUNNING ON THE CLUSTER):
  97. # ======================================================================
  98. job_template = """
  99. #PBS -l walltime=10:00:00
  100. #PBS -j oe
  101. #PBS -o /home/mpib/wittkuhn/highspeed/highspeed-glm/logs/glm
  102. #PBS -m n
  103. #PBS -v FSLOUTPUTTYPE=NIFTI_GZ
  104. source /etc/bash_completion.d/virtualenvwrapper
  105. workon highspeed-glm
  106. module load fsl/5.0
  107. module load matlab/R2017b
  108. module load freesurfer/6.0.0
  109. """
  110. # ======================================================================
  111. # SETTING UP LOGGING
  112. # ======================================================================
  113. #path_log = opj(path_root, 'logs', 'l1analyis')
  114. # enable debug mode for logging and configuration:
  115. #config.enable_debug_mode()
  116. # enable logging to file and provide path to the logging file:
  117. #config.update_config({'logging': {'log_directory': path_log,
  118. # 'log_to_file': True},
  119. # 'execution': {'stop_on_first_crash': False,
  120. # 'keep_unnecessary_outputs': 'false'},
  121. # 'monitoring': {'enabled': True}
  122. # })
  123. # update the global logging settings:
  124. # logging.update_logging(config)
  125. # callback_log_path = opj(path_log,'ressources.log')
  126. # logger = logging.getLogger('callback')
  127. # logger.setLevel(logging.DEBUG)
  128. # handler = logging.FileHandler(callback_log_path)
  129. # logger.addHandler(handler)
  130. # ======================================================================
  131. # SETTING UP LOGGING
  132. # ======================================================================
  133. #path_log = opj(path_root, 'logs', 'l1analyis')
  134. ## create directory to save the log files if it does not exist yet:
  135. #if not os.path.exists(path_log):
  136. # os.makedirs(path_log)
  137. ## configure logging:
  138. #logging.basicConfig(
  139. # filename=opj(path_log,'log_l1analysis.log'),
  140. # level=logging.DEBUG,
  141. # filemode = "a+",
  142. # format='%(asctime)s - %(levelname)s - %(message)s',
  143. # datefmt='%d/%m/%Y %H:%M:%S')
  144. #logging.getLogger().addHandler(logging.StreamHandler())
  145. # ======================================================================
  146. # LOG INFORMATION ABOUT THE EXECUTION
  147. # ======================================================================
  148. # print the loaded SPM version:
  149. #analysis_name = "Level 1 GLM analysis"
  150. #logging.info("--------------------------------------------------------")
  151. #logging.info("Analysis: " + analysis_name)
  152. #logging.info("SPM version: " + (spm.SPMCommand().version))
  153. #logging.info("List of subjects:")
  154. #logging.info(sub_list)
  155. # ======================================================================
  156. # DEFINE SETTINGS
  157. # ======================================================================
  158. # time of repetition, in seconds:
  159. time_repetition = 1.25
  160. # total number of runs:
  161. num_runs = 8
  162. # smoothing kernel, in mm:
  163. fwhm = 4
  164. # number of dummy variables to remove from each run:
  165. num_dummy = 0
  166. # ======================================================================
  167. # DEFINE NODE: INFOSOURCE
  168. # ======================================================================
  169. # define the infosource node that collects the data:
  170. infosource = Node(IdentityInterface(
  171. fields=['subject_id']), name='infosource')
  172. # let the node iterate (paralellize) over all subjects:
  173. infosource.iterables = [('subject_id', sub_list)]
  174. # ======================================================================
  175. # DEFINE SELECTFILES NODE
  176. # ======================================================================
  177. # define all relevant files paths:
  178. templates = dict(
  179. confounds=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}',
  180. '*', 'func', '*highspeed*confounds_regressors.tsv'),
  181. events=opj(path_root, 'bids', '{subject_id}', '*', 'func',
  182. '*events.tsv'),
  183. func=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}', '*',
  184. 'func', '*highspeed*space-T1w*preproc_bold.nii.gz'),
  185. anat=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}',
  186. 'anat', '{subject_id}_desc-preproc_T1w.nii.gz'),
  187. wholemask=opj(path_root, 'derivatives', 'fmriprep', '{subject_id}',
  188. '*', 'func', '*highspeed*space-T1w*brain_mask.nii.gz'),
  189. )
  190. # define the selectfiles node:
  191. selectfiles = Node(SelectFiles(templates, sort_filelist=True),
  192. name='selectfiles')
  193. # set expected thread and memory usage for the node:
  194. selectfiles.interface.num_threads = 1
  195. selectfiles.interface.mem_gb = 0.1
  196. # selectfiles.inputs.subject_id = 'sub-20'
  197. # selectfiles_results = selectfiles.run()
  198. # ======================================================================
  199. # DEFINE CREATE_SUSAN_SMOOTH WORKFLOW NODE
  200. # ======================================================================
  201. # define the susan smoothing node and specify the smoothing fwhm:
  202. susan = create_susan_smooth()
  203. # set the smoothing kernel:
  204. susan.inputs.inputnode.fwhm = fwhm
  205. # set expected thread and memory usage for the nodes:
  206. susan.get_node('inputnode').interface.num_threads = 1
  207. susan.get_node('inputnode').interface.mem_gb = 0.1
  208. susan.get_node('median').interface.num_threads = 1
  209. susan.get_node('median').interface.mem_gb = 3
  210. susan.get_node('mask').interface.num_threads = 1
  211. susan.get_node('mask').interface.mem_gb = 3
  212. susan.get_node('meanfunc2').interface.num_threads = 1
  213. susan.get_node('meanfunc2').interface.mem_gb = 3
  214. susan.get_node('merge').interface.num_threads = 1
  215. susan.get_node('merge').interface.mem_gb = 3
  216. susan.get_node('multi_inputs').interface.num_threads = 1
  217. susan.get_node('multi_inputs').interface.mem_gb = 3
  218. susan.get_node('smooth').interface.num_threads = 1
  219. susan.get_node('smooth').interface.mem_gb = 3
  220. susan.get_node('outputnode').interface.num_threads = 1
  221. susan.get_node('outputnode').interface.mem_gb = 0.1
  222. # ======================================================================
  223. # DEFINE NODE: FUNCTION TO GET THE SUBJECT-SPECIFIC INFORMATION
  224. # ======================================================================
  225. subject_info = MapNode(Function(
  226. input_names=['events', 'confounds'],
  227. output_names=['subject_info', 'event_names'],
  228. function=get_subject_info),
  229. name='subject_info', iterfield=['events', 'confounds'])
  230. # set expected thread and memory usage for the node:
  231. subject_info.interface.num_threads = 1
  232. subject_info.interface.mem_gb = 0.1
  233. # subject_info.inputs.events = selectfiles_results.outputs.events
  234. # subject_info.inputs.confounds = selectfiles_results.outputs.confounds
  235. # subject_info_results = subject_info.run()
  236. # subject_info_results.outputs.subject_info
  237. # ======================================================================
  238. # DEFINE NODE: REMOVE DUMMY VARIABLES (USING FSL ROI)
  239. # ======================================================================
  240. # function: extract region of interest (ROI) from an image
  241. trim = MapNode(ExtractROI(), name='trim', iterfield=['in_file'])
  242. # define index of the first selected volume (i.e., minimum index):
  243. trim.inputs.t_min = num_dummy
  244. # define the number of volumes selected starting at the minimum index:
  245. trim.inputs.t_size = -1
  246. # define the fsl output type:
  247. trim.inputs.output_type = 'NIFTI'
  248. # set expected thread and memory usage for the node:
  249. trim.interface.num_threads = 1
  250. trim.interface.mem_gb = 3
  251. # ======================================================================
  252. # DEFINE NODE: LEAVE-ONE-RUN-OUT SELECTION OF DATA
  253. # ======================================================================
  254. leave_one_run_out = Node(Function(
  255. input_names=['subject_info', 'event_names', 'data_func', 'run'],
  256. output_names=['subject_info', 'data_func', 'contrasts'],
  257. function=leave_one_out),
  258. name='leave_one_run_out')
  259. # define the number of rows as an iterable:
  260. leave_one_run_out.iterables = ('run', range(num_runs))
  261. # ======================================================================
  262. # DEFINE NODE: SPECIFY SPM MODEL (GENERATE SPM-SPECIFIC MODEL)
  263. # ======================================================================
  264. # function: makes a model specification compatible with spm designers
  265. # adds SPM specific options to SpecifyModel
  266. l1model = Node(SpecifySPMModel(), name="l1model")
  267. # input: concatenate runs to a single session (boolean, default: False):
  268. l1model.inputs.concatenate_runs = False
  269. # input: units of event onsets and durations (secs or scans):
  270. l1model.inputs.input_units = 'secs'
  271. # input: units of design event onsets and durations (secs or scans):
  272. l1model.inputs.output_units = 'secs'
  273. # input: time of repetition (a float):
  274. l1model.inputs.time_repetition = time_repetition
  275. # high-pass filter cutoff in secs (a float, default = 128 secs):
  276. l1model.inputs.high_pass_filter_cutoff = 128
  277. # ======================================================================
  278. # DEFINE NODE: LEVEL 1 DESIGN (GENERATE AN SPM DESIGN MATRIX)
  279. # ======================================================================
  280. # function: generate an SPM design matrix
  281. l1design = Node(Level1Design(), name="l1design")
  282. # input: (a dictionary with keys which are 'hrf' or 'fourier' or
  283. # 'fourier_han' or 'gamma' or 'fir' and with values which are any value)
  284. l1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
  285. # input: units for specification of onsets ('secs' or 'scans'):
  286. l1design.inputs.timing_units = 'secs'
  287. # input: interscan interval / repetition time in secs (a float):
  288. l1design.inputs.interscan_interval = time_repetition
  289. # input: Model serial correlations AR(1), FAST or none:
  290. l1design.inputs.model_serial_correlations = 'AR(1)'
  291. # input: number of time-bins per scan in secs (an integer):
  292. l1design.inputs.microtime_resolution = 16
  293. # input: the onset/time-bin in seconds for alignment (a float):
  294. l1design.inputs.microtime_onset = 1
  295. # set expected thread and memory usage for the node:
  296. l1design.interface.num_threads = 1
  297. l1design.interface.mem_gb = 2
  298. # ======================================================================
  299. # DEFINE NODE: ESTIMATE MODEL (ESTIMATE THE PARAMETERS OF THE MODEL)
  300. # ======================================================================
  301. # function: use spm_spm to estimate the parameters of a model
  302. l1estimate = Node(EstimateModel(), name="l1estimate")
  303. # input: (a dictionary with keys which are 'Classical' or 'Bayesian2'
  304. # or 'Bayesian' and with values which are any value)
  305. l1estimate.inputs.estimation_method = {'Classical': 1}
  306. # set expected thread and memory usage for the node:
  307. l1estimate.interface.num_threads = 1
  308. l1estimate.interface.mem_gb = 2
  309. # ======================================================================
  310. # DEFINE NODE: ESTIMATE CONTRASTS (ESTIMATES THE CONTRASTS)
  311. # ======================================================================
  312. # function: use spm_contrasts to estimate contrasts of interest
  313. l1contrasts = Node(EstimateContrast(), name="l1contrasts")
  314. # input: list of contrasts with each contrast being a list of the form:
  315. # [('name', 'stat', [condition list], [weight list], [session list])]:
  316. # l1contrasts.inputs.contrasts = l1contrasts_list
  317. # node input: overwrite previous results:
  318. l1contrasts.overwrite = True
  319. # set expected thread and memory usage for the node:
  320. l1contrasts.interface.num_threads = 1
  321. l1contrasts.interface.mem_gb = 1.5
  322. # ======================================================================
  323. # DEFINE NODE: FUNCTION TO PLOT CONTRASTS
  324. # ======================================================================
  325. plot_contrasts = MapNode(Function(
  326. input_names=['anat', 'stat_map', 'thresh'],
  327. output_names=['out_path'],
  328. function=plot_stat_maps),
  329. name='plot_contrasts', iterfield=['thresh'])
  330. # input: plot data with set of different thresholds:
  331. plot_contrasts.inputs.thresh = [None, 1, 2, 3]
  332. # set expected thread and memory usage for the node:
  333. plot_contrasts.interface.num_threads = 1
  334. plot_contrasts.interface.mem_gb = 0.2
  335. # ======================================================================
  336. # DEFINE NODE: THRESHOLD
  337. # ======================================================================
  338. # function: Topological FDR thresholding based on cluster extent/size.
  339. # Smoothness is estimated from GLM residuals but is assumed to be the
  340. # same for all of the voxels.
  341. thresh = Node(Threshold(), name="thresh")
  342. # input: whether to use FWE (Bonferroni) correction for initial threshold
  343. # (a boolean, nipype default value: True):
  344. thresh.inputs.use_fwe_correction = False
  345. # input: whether to use FDR over cluster extent probabilities (boolean)
  346. thresh.inputs.use_topo_fdr = True
  347. # input: value for initial thresholding (defining clusters):
  348. thresh.inputs.height_threshold = 0.05
  349. # input: is the cluster forming threshold a stat value or p-value?
  350. # ('p-value' or 'stat', nipype default value: p-value):
  351. thresh.inputs.height_threshold_type = 'p-value'
  352. # input: which contrast in the SPM.mat to use (an integer):
  353. thresh.inputs.contrast_index = 1
  354. # input: p threshold on FDR corrected cluster size probabilities (float):
  355. thresh.inputs.extent_fdr_p_threshold = 0.05
  356. # input: minimum cluster size in voxels (an integer, default = 0):
  357. thresh.inputs.extent_threshold = 0
  358. # set expected thread and memory usage for the node:
  359. thresh.interface.num_threads = 1
  360. thresh.interface.mem_gb = 0.2
  361. # ======================================================================
  362. # DEFINE NODE: THRESHOLD STATISTICS
  363. # ======================================================================
  364. # function: Given height and cluster size threshold calculate
  365. # theoretical probabilities concerning false positives
  366. thresh_stat = Node(ThresholdStatistics(), name="thresh_stat")
  367. # input: which contrast in the SPM.mat to use (an integer):
  368. thresh_stat.inputs.contrast_index = 1
  369. # ======================================================================
  370. # CREATE DATASINK NODE (OUTPUT STREAM):
  371. # ======================================================================
  372. # create a node of the function:
  373. l1datasink = Node(DataSink(), name='datasink')
  374. # assign the path to the base directory:
  375. l1datasink.inputs.base_directory = opj(path_root, 'l1pipeline')
  376. # create a list of substitutions to adjust the file paths of datasink:
  377. substitutions = [('_subject_id_', '')]
  378. # assign the substitutions to the datasink command:
  379. l1datasink.inputs.substitutions = substitutions
  380. # determine whether to store output in parameterized form:
  381. l1datasink.inputs.parameterization = True
  382. # set expected thread and memory usage for the node:
  383. l1datasink.interface.num_threads = 1
  384. l1datasink.interface.mem_gb = 0.2
  385. # ======================================================================
  386. # DEFINE THE LEVEL 1 ANALYSIS SUB-WORKFLOW AND CONNECT THE NODES:
  387. # ======================================================================
  388. # initiation of the 1st-level analysis workflow:
  389. l1analysis = Workflow(name='l1analysis')
  390. # connect the 1st-level analysis components
  391. l1analysis.connect(l1model, 'session_info', l1design, 'session_info')
  392. l1analysis.connect(l1design, 'spm_mat_file', l1estimate, 'spm_mat_file')
  393. l1analysis.connect(l1estimate, 'spm_mat_file', l1contrasts, 'spm_mat_file')
  394. l1analysis.connect(l1estimate, 'beta_images', l1contrasts, 'beta_images')
  395. l1analysis.connect(l1estimate, 'residual_image', l1contrasts, 'residual_image')
  396. # ======================================================================
  397. # DEFINE META-WORKFLOW PIPELINE:
  398. # ======================================================================
  399. # initiation of the 1st-level analysis workflow:
  400. l1pipeline = Workflow(name='l1pipeline')
  401. # stop execution of the workflow if an error is encountered:
  402. l1pipeline.config = {'execution': {'stop_on_first_crash': True,
  403. 'hash_method': 'timestamp'}}
  404. # define the base directory of the workflow:
  405. l1pipeline.base_dir = opj(path_root, 'work')
  406. # ======================================================================
  407. # ENABLE LOGGING:
  408. # ======================================================================
  409. # enable logging to file:
  410. #config.enable_debug_mode()
  411. #config.update_config({'logging': {'log_directory': os.getcwd(),
  412. # 'log_to_file': True}})
  413. #logging.update_logging(config)
  414. # ======================================================================
  415. # CONNECT WORKFLOW NODES:
  416. # ======================================================================
  417. # connect infosource to selectfiles node:
  418. l1pipeline.connect(infosource, 'subject_id', selectfiles, 'subject_id')
  419. # generate subject specific events and regressors to subject_info:
  420. l1pipeline.connect(selectfiles, 'events', subject_info, 'events')
  421. l1pipeline.connect(selectfiles, 'confounds', subject_info, 'confounds')
  422. # connect functional files to smoothing workflow:
  423. l1pipeline.connect(selectfiles, 'func', susan, 'inputnode.in_files')
  424. l1pipeline.connect(selectfiles, 'wholemask', susan, 'inputnode.mask_file')
  425. l1pipeline.connect(susan, 'outputnode.smoothed_files', l1datasink, 'smooth')
  426. # connect smoothed functional data to the trimming node:
  427. l1pipeline.connect(susan, 'outputnode.smoothed_files', trim, 'in_file')
  428. # ======================================================================
  429. # INPUT AND OUTPUT STREAM FOR THE LEVEL 1 SPM ANALYSIS SUB-WORKFLOW:
  430. # ======================================================================
  431. # connect regressors to the subsetting node::
  432. l1pipeline.connect(subject_info, 'subject_info', leave_one_run_out, 'subject_info')
  433. # connect event_names to the subsetting node:
  434. l1pipeline.connect(subject_info, 'event_names', leave_one_run_out, 'event_names')
  435. # connect smoothed and trimmed data to subsetting node:
  436. l1pipeline.connect(trim, 'roi_file', leave_one_run_out, 'data_func')
  437. # connect regressors to the level 1 model specification node:
  438. l1pipeline.connect(leave_one_run_out, 'subject_info', l1analysis, 'l1model.subject_info')
  439. # connect smoothed and trimmed data to the level 1 model specification:
  440. l1pipeline.connect(leave_one_run_out, 'data_func', l1analysis, 'l1model.functional_runs')
  441. # connect l1 contrast specification to contrast estimation
  442. l1pipeline.connect(leave_one_run_out, 'contrasts', l1analysis, 'l1contrasts.contrasts')
  443. # connect the anatomical image to the plotting node:
  444. l1pipeline.connect(selectfiles, 'anat', plot_contrasts, 'anat')
  445. # connect spm t-images to the plotting node:
  446. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', plot_contrasts, 'stat_map')
  447. # connect the t-images and spm mat file to the threshold node:
  448. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', thresh, 'stat_image')
  449. l1pipeline.connect(l1analysis, 'l1contrasts.spm_mat_file', thresh, 'spm_mat_file')
  450. # connect all output results of the level 1 analysis to the datasink:
  451. l1pipeline.connect(l1analysis, 'l1estimate.beta_images', l1datasink, 'estimates.@beta_images')
  452. l1pipeline.connect(l1analysis, 'l1estimate.residual_image', l1datasink, 'estimates.@residual_image')
  453. l1pipeline.connect(l1analysis, 'l1contrasts.spm_mat_file', l1datasink, 'contrasts.@spm_mat')
  454. l1pipeline.connect(l1analysis, 'l1contrasts.spmT_images', l1datasink, 'contrasts.@spmT')
  455. l1pipeline.connect(l1analysis, 'l1contrasts.con_images', l1datasink, 'contrasts.@con')
  456. l1pipeline.connect(plot_contrasts, 'out_path', l1datasink, 'contrasts.@out_path')
  457. l1pipeline.connect(thresh, 'thresholded_map', l1datasink, 'thresh.@threshhold_map')
  458. l1pipeline.connect(thresh, 'pre_topo_fdr_map', l1datasink, 'thresh.@pre_topo_fdr_map')
  459. # ======================================================================
  460. # WRITE GRAPH AND EXECUTE THE WORKFLOW
  461. # ======================================================================
  462. # write the graph:
  463. l1pipeline.write_graph(graph2use='colored', simple_form=True)
  464. # set the maximum resources the workflow can utilize:
  465. # args_dict = {'status_callback' : log_nodes_cb}
  466. # execute the workflow depending on the operating system:
  467. if 'darwin' in sys.platform:
  468. # will execute the workflow using all available cpus:
  469. l1pipeline.run(plugin='MultiProc')
  470. elif 'linux' in sys.platform:
  471. l1pipeline.run(plugin='PBS', plugin_args=dict(template=job_template))