process.sub 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. #!/bin/bash
  2. subject=$(basename $1)
  3. subd="$1"
  4. executable=$(pwd)/code/participant_job
  5. # the job expects these environment variables for labeling and synchronization
  6. # - JOBID: subject AND process specific ID to make a branch name from
  7. # (must be unique across all (even multiple) submissions)
  8. # including the cluster ID will enable sorting multiple computing attempts
  9. # - DSLOCKFILE: lock (must be accessible from all compute jobs) to synchronize
  10. # write access to the output dataset
  11. # - DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__...:
  12. # (additional) locations for datalad to locate relevant subdatasets, in case
  13. # a configured URL is outdated
  14. # - GIT_AUTHOR_...: Identity information used to save dataset changes in compute
  15. # jobs
  16. export JOBID=${subject} \
  17. DSLOCKFILE=$(pwd)/.condor_datalad_lock \
  18. GIT_AUTHOR_NAME='Felix Hoffstaedter' \
  19. GIT_AUTHOR_EMAIL='f.hoffstaedter@fz-juelich.de'
  20. # essential args for "participant_job"
  21. # 1: where to clone the analysis dataset
  22. # 2: location to push the result git branch to. The "ria+" prefix is stripped.
  23. # 3: ID of the subject to process
  24. arguments="ria+file:///data/project/cat_preprocessed/inputstore#6c5791d8-1803-48a1-bbaa-2b5e23b5f707 \
  25. /data/project/cat_preprocessed/dataladstore/6c5/791d8-1803-48a1-bbaa-2b5e23b5f707 \
  26. ${subd} \
  27. "
  28. mkdir -p /tmp/tmp_${subject:4}
  29. cd /tmp/tmp_${subject:4}
  30. ${executable} ${arguments} \
  31. > /data/project/cat_preprocessed/TMP/CAT12.8/CORR_cat12.8.1/logs/${subject}.out \
  32. 2> /data/project/cat_preprocessed/TMP/CAT12.8/CORR_cat12.8.1/logs/${subject}.err
  33. chmod +w -R /tmp/tmp_${subject:4} && rm -rf /tmp/tmp_${subject:4}