Browse Source

SLURM submission setup

Adina Wagner 3 years ago
parent
commit
3b2010da3e
5 changed files with 72 additions and 0 deletions
  1. 1 0
      .gitignore
  2. 22 0
      code/all.jobs
  3. 19 0
      code/call.job
  4. 19 0
      code/process.sbatch
  5. 11 0
      code/runJOB.sh

+ 1 - 0
.gitignore

@@ -1,3 +1,4 @@
 logs
 dag_tmp
 .condor_datalad_lock
+.SLURM_datalad_lock

+ 22 - 0
code/all.jobs

@@ -0,0 +1,22 @@
+#!/bin/bash
+code/call.job sub-05
+code/call.job sub-16
+code/call.job sub-11
+code/call.job sub-08
+code/call.job sub-02
+code/call.job sub-06
+code/call.job sub-15
+code/call.job sub-20
+code/call.job sub-18
+code/call.job sub-12
+code/call.job sub-01
+code/call.job sub-03
+code/call.job sub-09
+code/call.job sub-10
+code/call.job sub-17
+code/call.job sub-04
+code/call.job sub-21
+code/call.job sub-13
+code/call.job sub-19
+code/call.job sub-14
+code/call.job sub-07

+ 19 - 0
code/call.job

@@ -0,0 +1,19 @@
+#!/bin/bash -x
+#
+# redundant input per subject
+
+subid=$1
+
+# define DSLOCKFILE, DATALAD & GIT ENV for participant_job
+export DSLOCKFILE=/data/group/psyinf/ukb_workflow_template/forrest/.SLURM_datalad_lock DATALAD_GET_SUBDATASET__SOURCE__CANDIDATE__101cat=https://github.com/ReproNim/containers.git#{id} GIT_AUTHOR_NAME=$(git config user.name) GIT_AUTHOR_EMAIL=$(git config user.email) JOBID=${subid:4}.${SLURM_JOB_ID} 
+# use subject specific folder
+mkdir /dev/shm//${JOBID}
+cd /dev/shm//${JOBID}
+
+# run things
+/data/group/psyinf/ukb_workflow_template/forrest/code/participant_job ria+file:///data/group/psyinf/inputstore#47a9ef9e-9fa0-49cd-873f-c51cc501da96 /data/group/psyinf/outputstore/47a/9ef9e-9fa0-49cd-873f-c51cc501da96 ${subid} >/data/group/psyinf/ukb_workflow_template/forrest/logs/${JOBID}.out 2>/data/group/psyinf/ukb_workflow_template/forrest/logs/${JOBID}.err
+
+cd /dev/shm//
+chmod 777 -R /dev/shm//${JOBID}
+rm -fr /dev/shm//${JOBID}
+

+ 19 - 0
code/process.sbatch

@@ -0,0 +1,19 @@
+#!/bin/bash -x
+### If you need a compute time project for job submission set here
+#SBATCH --account=FIXME
+#SBATCH --mail-user=FIXME
+#SBATCH --mail-type=END
+#SBATCH --job-name=FIXME
+#SBATCH --output=logs/processing-out.%j
+#SBATCH --error=logs/processing-err.%j
+### If there's a time limit for job runs, set (max) here
+#SBATCH --time=24:00:00
+#SBATCH --ntasks-per-node=1
+### If specific partitions are available i.e. with more RAM define here
+#SBATCH --partition=FIXME
+#SBATCH --nodes=1
+
+### Define number of jobs that arer run simultaneously
+srun parallel --delay 0.2  -a code/all.jobs --FIXME
+
+wait

+ 11 - 0
code/runJOB.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# splitting the all.jobs file according to node distribution
+# in the PWD numbered files [1 -> splits] are created and deleted after
+JOBFILE=code/all.jobs
+splits=FIXME
+
+parallel -j${splits} --block -1 -a $JOBFILE --header : --pipepart 'cat > {#}'
+# submitting independent SLURM jobs for efficiency and robustness
+parallel 'sbatch code/catpart.sbatch {}' ::: $(seq ${splits})
+