highspeed-defacing-cluster.sh 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. #!/bin/bash
  2. # ==============================================================================
  3. # SCRIPT INFORMATION:
  4. # ==============================================================================
  5. # SCRIPT: DEFACING ANATOMICAL MRI DATA IN A BIDS DATASET
  6. # PROJECT: HIGHSPEED
  7. # WRITTEN BY LENNART WITTKUHN, 2018 - 2020
  8. # CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
  9. # MAX PLANCK RESEARCH GROUP NEUROCODE
  10. # MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
  11. # MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
  12. # LENTZEALLEE 94, 14195 BERLIN, GERMANY
  13. # ==============================================================================
  14. # DEFINE ALL PATHS:
  15. # ==============================================================================
  16. # define home directory:
  17. PATH_BASE="${HOME}"
  18. # define the name of the current task:
  19. TASK_NAME="pydeface"
  20. # define the name of the project:
  21. PATH_ROOT="${PATH_BASE}/highspeed"
  22. # define the name of the project:
  23. PROJECT_NAME="highspeed-bids"
  24. # define the path to the project folder:
  25. PATH_PROJECT="${PATH_ROOT}/${PROJECT_NAME}"
  26. # define the path to the singularity container:
  27. PATH_CONTAINER="${PATH_PROJECT}/tools/${TASK_NAME}/${TASK_NAME}_37-2e0c2d.sif"
  28. # path to the log directory:
  29. PATH_LOG="${PATH_PROJECT}/logs/${TASK_NAME}"
  30. # path to the data directory (in BIDS format):
  31. PATH_BIDS="${PATH_PROJECT}"
  32. # ==============================================================================
  33. # CREATE RELEVANT DIRECTORIES:
  34. # ==============================================================================
  35. # create directory for log files:
  36. if [ ! -d ${PATH_LOG} ]; then
  37. mkdir -p ${PATH_LOG}
  38. else
  39. # remove old log files inside the log container:
  40. rm -r ${PATH_LOG}/*
  41. fi
  42. # ==============================================================================
  43. # DEFINE PARAMETERS:
  44. # ==============================================================================
  45. # maximum number of cpus per process:
  46. N_CPUS=1
  47. # memory demand in *MB*
  48. MEM_MB=500
  49. # memory demand in *GB*
  50. MEM_GB="$((${MEM_MB} / 1000))"
  51. # memory demand in *KB*
  52. MEM_KB="$((${MEM_MB} * 1000))"
  53. # ==============================================================================
  54. # RUN PYDEFACE:
  55. # ==============================================================================
  56. for FILE in ${PATH_BIDS}/*/*/anat/*T1w.nii.gz; do
  57. # to just get filename from a given path:
  58. FILE_BASENAME="$(basename -- $FILE)"
  59. # get the parent directory:
  60. FILE_PARENT="$(dirname "$FILE")"
  61. # create cluster job:
  62. echo "#!/bin/bash" > job
  63. # name of the job
  64. echo "#SBATCH --job-name pydeface_${FILE_BASENAME}" >> job
  65. # set the expected maximum running time for the job:
  66. echo "#SBATCH --time 1:00:00" >> job
  67. # determine how much RAM your operation needs:
  68. echo "#SBATCH --mem ${MEM_GB}GB" >> job
  69. # email notification on abort/end, use 'n' for no notification:
  70. echo "#SBATCH --mail-type NONE" >> job
  71. # writelog to log folder
  72. echo "#SBATCH --output ${PATH_LOG}/slurm-%j.out" >> job
  73. # request multiple cpus
  74. echo "#SBATCH --cpus-per-task ${N_CPUS}" >> job
  75. # define the main command:
  76. echo "singularity run -B ${FILE_PARENT}:/input:rw ${PATH_CONTAINER} \
  77. pydeface /input/${FILE_BASENAME} --force" >> job
  78. # submit job to cluster queue and remove it to avoid confusion:
  79. sbatch job
  80. rm -f job
  81. done