highspeed-defacing-cluster.sh 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. #!/bin/bash
  2. # ==============================================================================
  3. # SCRIPT INFORMATION:
  4. # ==============================================================================
  5. # SCRIPT: DEFACING ANATOMICAL MRI DATA IN A BIDS DATASET
  6. # PROJECT: HIGHSPEED
  7. # WRITTEN BY LENNART WITTKUHN, 2018 - 2020
  8. # CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
  9. # MAX PLANCK RESEARCH GROUP NEUROCODE
  10. # MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
  11. # MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
  12. # LENTZEALLEE 94, 14195 BERLIN, GERMANY
  13. # ==============================================================================
  14. # DEFINE ALL PATHS:
  15. # ==============================================================================
  16. # define home directory:
  17. PATH_BASE="${HOME}"
  18. # define the name of the current task:
  19. TASK_NAME="pydeface"
  20. # define the name of the project:
  21. PROJECT_NAME="highspeed"
  22. # path to the singularity container:
  23. PATH_CONTAINER=${PATH_BASE}/tools/${TASK_NAME}/${TASK_NAME}_37-2e0c2d.sif
  24. # path to the log directory:
  25. PATH_LOG=${PATH_BASE}/${PROJECT_NAME}/logs/${TASK_NAME}
  26. # path to the data directory (in bids format):
  27. PATH_BIDS=${PATH_BASE}/${PROJECT_NAME}/bids
  28. # ==============================================================================
  29. # CREATE RELEVANT DIRECTORIES:
  30. # ==============================================================================
  31. # create directory for log files:
  32. if [ ! -d ${PATH_LOG} ]; then
  33. mkdir -p ${PATH_LOG}
  34. else
  35. # remove old log files inside the log container:
  36. rm -r ${PATH_LOG}/*
  37. fi
  38. # ==============================================================================
  39. # DEFINE PARAMETERS:
  40. # ==============================================================================
  41. # maximum number of cpus per process:
  42. N_CPUS=1
  43. # memory demand in *MB*
  44. MEM_MB=500
  45. # memory demand in *KB*
  46. MEM_KB="$((${MEM_MB} * 1000))"
  47. # ==============================================================================
  48. # RUN PYDEFACE:
  49. # ==============================================================================
  50. for FILE in ${PATH_BIDS}/*/*/anat/*T1w.nii.gz; do
  51. # to just get filename from a given path:
  52. FILE_BASENAME="$(basename -- $FILE)"
  53. # get the parent directory:
  54. FILE_PARENT="$(dirname "$FILE")"
  55. # name of the job
  56. echo "#PBS -N pydeface_${FILE_BASENAME}" >> job
  57. # set the expected maximum running time for the job:
  58. echo "#PBS -l walltime=1:00:00" >> job
  59. # determine how much RAM your operation needs:
  60. echo "#PBS -l mem=${MEM_KB}kb" >> job
  61. # email notification on abort/end, use 'n' for no notification:
  62. echo "#PBS -m n" >> job
  63. # write (output) log to log folder
  64. echo "#PBS -o ${PATH_LOG}" >> job
  65. # write (error) log to log folder
  66. echo "#PBS -e ${PATH_LOG}" >> job
  67. # request multiple cpus
  68. echo "#PBS -l nodes=1:ppn=${N_CPUS}" >> job
  69. # define the main command:
  70. echo "singularity run -B ${FILE_PARENT}:/input:rw ${PATH_CONTAINER} pydeface /input/${FILE_BASENAME} --force" >> job
  71. # submit job to cluster queue and remove it to avoid confusion:
  72. qsub job
  73. rm -f job
  74. done