#!/bin/bash # activate virtual environment for DataLad 0.16.+ source ~/datalad_venv/datalad-dev/bin/activate # the job assumes that it is a good idea to run everything in PWD # the job manager should make sure that is true # fail whenever something is fishy, use -x to get verbose logfiles set -e -u -x dssource="$1" pushgitremote="$2" subid="$3" # get the analysis dataset, which includes the inputs as well # importantly, we do not clone from the lcoation that we want to push the # results too, in order to avoid too many jobs blocking access to # the same location and creating a throughput bottleneck datalad clone "${dssource}" ds # all following actions are performed in the context of the superdataset cd ds # in order to avoid accumulation temporary git-annex availability information # and to avoid a syncronization bottleneck by having to consolidate the # git-annex branch across jobs, we will only push the main tracking branch # back to the output store (plus the actual file content). Final availability # information can be establish via an eventual "git-annex fsck -f ReproVBM_out-storage". # this remote is never fetched, it accumulates a larger number of branches # and we want to avoid progressive slowdown. Instead we only ever push # a unique branch per each job (subject AND process specific name) git remote add outputstore "$pushgitremote" # all results of this job will be put into a dedicated branch git checkout -b "job-${JOBID}" # we pull down the input subject manually in order to discover relevant # files. We do this outside the recorded call, because on a potential # re-run we want to be able to do fine-grained recomputing of individual # outputs. The recorded calls will have specific paths that will enable # recomputation outside the scope of the original Condor setup datalad get -n "inputs/ds003455" # the meat of the matter # look for T1w files in the input data for the given participant # it is critical for reproducibility that the command given to # "containers-run" does not rely on any property of the immediate # computational environment (env vars, services, etc) find \ inputs/ds003455/${subid} \ -name '*T1w.nii.gz' \ -exec sh -c ' odir=$(echo {} | cut -d / -f3); datalad -c datalad.annex.retry=12 containers-run \ -m "Compute $odir" \ -n cat12-8 \ --explicit \ -o $odir \ -i {} \ -i code/finalize_job_outputs_ENIGMA.sh \ sh -e -u -x -c " rm -rf {outputs[0]} ; mkdir -p {outputs[0]} \ && cp {inputs[0]} {outputs[0]} \ && /singularity -b code/cat_standalone_segment_enigma.m {outputs[0]}/*.nii.gz \ && rm {outputs[0]}/*.nii* \ && gzip {outputs[0]}/*/*.nii \ " \ ' \; # remove big files from results after hashing before pushing to ria datalad drop --what filecontent --reckless kill ${subid}/mri/iy* ${subid}/mri/y* ${subid}/mri/anon_m* ${subid}/mri/wj* ${subid}/*/*.pdf ${subid}/surf/*sphere* ${subid}/surf/*pial* ${subid}/surf/*white* # it may be that the above command did not yield any outputs # and no commit was made (no T1s found for the given participant) # we nevertheless push the branch to have a records that this was # attempted and did not fail # file content first -- does not need a lock, no interaction with Git datalad push --to ReproVBM_out-storage # and the output branch flock --verbose $DSLOCKFILE git push outputstore echo SUCCESS # job handler should clean up workspace