highspeed-mriqc-subject-level.sh 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. #!/usr/bin/bash
  2. # ==============================================================================
  3. # SCRIPT INFORMATION:
  4. # ==============================================================================
  5. # SCRIPT: CREATE PARTICIPANT-LEVEL MRIQC REPORTS FOR A BIDS-STRUCTURED DATASET
  6. # PROJECT: HIGHSPEED
  7. # WRITTEN BY LENNART WITTKUHN, 2018 - 2020
  8. # CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
  9. # MAX PLANCK RESEARCH GROUP NEUROCODE
  10. # MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT (MPIB)
  11. # MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
  12. # LENTZEALLEE 94, 14195 BERLIN, GERMANY
  13. # ACKNOWLEDGEMENTS: THANKS TO ALEXANDER SKOWRON AND NIR MONETA @ MPIB FOR HELP
  14. # ==============================================================================
  15. # DEFINE ALL PATHS:
  16. # ==============================================================================
  17. # path to the base directory:
  18. PATH_BASE="${HOME}"
  19. # path to the project root directory
  20. PATH_ROOT="${PATH_BASE}/highspeed"
  21. # define the name of the project:
  22. PROJECT_NAME="highspeed-mriqc"
  23. # define the path to the project folder:
  24. PATH_PROJECT="${PATH_ROOT}/${PROJECT_NAME}"
  25. # define the name of the current task:
  26. TASK_NAME="mriqc"
  27. # define the path to the script main directory:
  28. PATH_CODE="${PATH_PROJECT}/code/${TASK_NAME}"
  29. # cd into the directory of the current task:
  30. cd "${PATH_CODE}"
  31. # define the path to the singularity container:
  32. PATH_CONTAINER="${PATH_PROJECT}/tools/${TASK_NAME}/${TASK_NAME}_0.15.2rc1.sif"
  33. # define the path for the templateflow cache
  34. PATH_TEMPLATEFLOW="${PATH_BASE}/.cache/templateflow"
  35. # path to the data directory (in bids format):
  36. PATH_INPUT="${PATH_PROJECT}/bids"
  37. # path to the output directory:
  38. PATH_OUTPUT=${PATH_PROJECT}/${TASK_NAME}
  39. # path to the working directory:
  40. PATH_WORK=${PATH_PROJECT}/work
  41. # path to the log directory:
  42. PATH_LOG=${PATH_PROJECT}/logs/$(date '+%Y%m%d_%H%M%S')
  43. # path to the text file with all subject ids:
  44. PATH_SUB_LIST="${PATH_CODE}/highspeed-participant-list.txt"
  45. # ==============================================================================
  46. # CREATE RELEVANT DIRECTORIES:
  47. # ==============================================================================
  48. # create working directory:
  49. if [ ! -d ${PATH_WORK} ]
  50. then
  51. mkdir -p ${PATH_WORK}
  52. fi
  53. # create directory for log files:
  54. if [ ! -d ${PATH_LOG} ]
  55. then
  56. mkdir -p ${PATH_LOG}
  57. else
  58. # remove old log files inside the log container:
  59. rm -r ${PATH_LOG}/*
  60. fi
  61. # ==============================================================================
  62. # DEFINE PARAMETERS:
  63. # ==============================================================================
  64. # maximum number of cpus per process:
  65. N_CPUS=5
  66. # memory demand in *GB*
  67. MEM_GB=10
  68. # read subject ids from the list of the text file:
  69. SUB_LIST=$(cat ${PATH_SUB_LIST} | tr '\n' ' ')
  70. # declare an array with sessions you want to run:
  71. declare -a SESSIONS=("01" "02")
  72. #for SUB in ${SUB_LIST}; do
  73. # ==============================================================================
  74. # RUN MRIQC:
  75. # ==============================================================================
  76. # initilize a subject counter:
  77. SUB_COUNT=0
  78. for SUB in ${SUB_LIST}; do
  79. # update the subject counter:
  80. let SUB_COUNT=SUB_COUNT+1
  81. # create the subject number with zero-padding:
  82. SUB_PAD=$(printf "%02d\n" ${SUB_COUNT})
  83. # loop over all sessions:
  84. for SES in ${SESSIONS[@]}; do
  85. # create a new job file:
  86. echo "#!/bin/bash" > job
  87. # name of the job
  88. echo "#SBATCH --job-name mriqc_sub-${SUB_PAD}_ses-${SES}" >> job
  89. # add partition to job
  90. echo "#SBATCH --partition gpu" >> job
  91. # set the expected maximum running time for the job:
  92. echo "#SBATCH --time 24:00:00" >> job
  93. # determine how much RAM your operation needs:
  94. echo "#SBATCH --mem ${MEM_GB}GB" >> job
  95. # email notification on abort/end, use 'n' for no notification:
  96. echo "#SBATCH --mail-type NONE" >> job
  97. # write log to log folder:
  98. echo "#SBATCH --output ${PATH_LOG}/slurm-mriqc-%j.out" >> job
  99. # request multiple cpus:
  100. echo "#SBATCH --cpus-per-task ${N_CPUS}" >> job
  101. # export template flow environment variable:
  102. echo "export SINGULARITYENV_TEMPLATEFLOW_HOME=/templateflow" >> job
  103. # define the main command:
  104. echo "singularity run --contain -B ${PATH_INPUT}:/input:ro \
  105. -B ${PATH_OUTPUT}:/output:rw -B ${PATH_WORK}:/work:rw \
  106. -B ${PATH_TEMPLATEFLOW}:/templateflow:rw \
  107. ${PATH_CONTAINER} /input/ /output/ participant --participant-label ${SUB_PAD} \
  108. --session-id ${SES} -w /work/ --verbose-reports --write-graph \
  109. --n_cpus ${N_CPUS} --mem_gb ${MEM_GB} --no-sub" >> job
  110. # submit job to cluster queue and remove it to avoid confusion:
  111. sbatch job
  112. rm -f job
  113. done
  114. done