#!/bin/bash # #SBATCH --job-name=glacierMIP_filling #SBATCH --ntasks=1 #SBATCH --cpus-per-task=64 #SBATCH --time=24:00:00 #SBATCH --mail-user=lilian.schuster@uibk.ac.at #SBATCH --mail-type=ALL #SBATCH --qos=high # Abort whenever a single step fails. Without this, bash will just continue on errors. set -e # On every node, when slurm starts a job, it will make sure the directory # /work/username exists and is writable by the jobs user. # We create a sub-directory there for this job to store its runtime data at. WORKDIR="/work/$SLURM_JOB_USER/$SLURM_JOB_ID/my_run" [[ -z "$SLURM_JOB_USER" ]] && WORKDIR="$(mktemp -d)" mkdir -p "$WORKDIR" echo "Workdir for this run: $WORKDIR" #scp trap "rm -rf '$WORKDIR'" EXIT ##Link www fmaussion data here to avoid useless downloads #preprocessed directory in www mkdir -p "$WORKDIR/cache/cluster.klima.uni-bremen.de" ln -s /home/www/fmaussion "$WORKDIR/cache/cluster.klima.uni-bremen.de/~fmaussion" ln -s /home/www/lschuster "$WORKDIR/cache/cluster.klima.uni-bremen.de/~lschuster" # Export the WORKDIR as environment variable so our script can use it to find its working directory. export WORKDIR # this JOBID can be used in the python script echo "$SLURM_ARRAY_TASK_ID" JOBID=$SLURM_ARRAY_TASK_ID export JOBID # Use the local data download cache export OGGM_DOWNLOAD_CACHE=/home/data/download export OGGM_DOWNLOAD_CACHE_RO=1 export OGGM_EXTRACT_DIR="${WORKDIR}/oggm_tmp" # 20220308 --> libgomp1 perror # start via sbatch postprocessing_fill_missing_glaciers_rounce.slurm '06' srun -n 1 -c "${SLURM_JOB_CPUS_PER_NODE}" singularity exec /home/users/lschuster/images/oggm_436be57b187609add6892d49ed76a0fc04a49307.sif bash -s <