#!/bin/bash # #SBATCH --job-name=calving #SBATCH --ntasks=1 #SBATCH --exclusive #SBATCH --mail-user=fabien.maussion@uibk.ac.at #SBATCH --mail-type=ALL # Abort whenever a single step fails. Without this, bash will just continue on errors. set -e # Current RGI region RGI_REG=`printf "%02d" $SLURM_ARRAY_TASK_ID` export RGI_REG # On every node, when slurm starts a job, it will make sure the directory # /work/username exists and is writable by the jobs user. # We create a sub-directory there for this job to store its runtime data at. WORKDIR="/work/$SLURM_JOB_USER/$SLURM_JOB_ID/rgi_reg_$RGI_REG" mkdir -p "$WORKDIR" echo "RGI Region: $RGI_REG" echo "Workdir for this run: $WORKDIR" # Export the WORKDIR as environment variable so our script can use it to find its working directory. export WORKDIR # Use the local data download cache export OGGM_DOWNLOAD_CACHE=/home/data/download export OGGM_DOWNLOAD_CACHE_RO=1 export OGGM_EXTRACT_DIR="/work/$SLURM_JOB_USER/$SLURM_JOB_ID/oggm_tmp" # All commands in the EOF block run inside of the container # Adjust container version to your needs, they are guaranteed to never change after their respective day has passed. srun -n 1 -c "${SLURM_JOB_CPUS_PER_NODE}" singularity exec docker://oggm/oggm:20200331 bash -s <