12345678910111213141516171819202122232425262728293031323334353637383940414243 |
- #!/bin/bash
- subject=$(basename $1)
- subd="$1"
- executable=$(pwd)/code/participant_job
- # the job expects these environment variables for labeling and synchronization
- # - JOBID: subject AND process specific ID to make a branch name from
- # (must be unique across all (even multiple) submissions)
- # including the cluster ID will enable sorting multiple computing attempts
- # - DSLOCKFILE: lock (must be accessible from all compute jobs) to synchronize
- # write access to the output dataset
- # - DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__...:
- # (additional) locations for datalad to locate relevant subdatasets, in case
- # a configured URL is outdated
- # - GIT_AUTHOR_...: Identity information used to save dataset changes in compute
- # jobs
- export JOBID=${subject} \
- DSLOCKFILE=$(pwd)/.condor_datalad_lock \
- GIT_AUTHOR_NAME='Felix Hoffstaedter' \
- GIT_AUTHOR_EMAIL='f.hoffstaedter@fz-juelich.de'
- # essential args for "participant_job"
- # 1: where to clone the analysis dataset
- # 2: location to push the result git branch to. The "ria+" prefix is stripped.
- # 3: ID of the subject to process
- arguments="ria+file:///data/project/cat_preprocessed/inputstore#6c5791d8-1803-48a1-bbaa-2b5e23b5f707 \
- /data/project/cat_preprocessed/dataladstore/6c5/791d8-1803-48a1-bbaa-2b5e23b5f707 \
- ${subd} \
- "
- mkdir -p /tmp/tmp_${subject:4}
- cd /tmp/tmp_${subject:4}
- ${executable} ${arguments} \
- > /data/project/cat_preprocessed/TMP/CAT12.8/CORR_cat12.8.1/logs/${subject}.out \
- 2> /data/project/cat_preprocessed/TMP/CAT12.8/CORR_cat12.8.1/logs/${subject}.err
- chmod +w -R /tmp/tmp_${subject:4} && rm -rf /tmp/tmp_${subject:4}
|