#!/bin/bash # #SBATCH --job-name=glathida #SBATCH --ntasks=1 #SBATCH --exclusive #SBATCH --time=24:00:00 #SBATCH --mail-user=fabien.maussion@uibk.ac.at #SBATCH --mail-type=ALL # Abort whenever a single step fails. Without this, bash will just continue on errors. set -e # On every node, when slurm starts a job, it will make sure the directory # /work/username exists and is writable by the jobs user. # We create a sub-directory there for this job to store its runtime data at. WORKDIR="/work/$SLURM_JOB_USER/$SLURM_JOB_ID/my_run" mkdir -p "$WORKDIR" echo "Workdir for this run: $WORKDIR" # Export the WORKDIR as environment variable so our script can use it to find its working directory. export WORKDIR # Use the local data download cache export OGGM_DOWNLOAD_CACHE=/home/data/download export OGGM_DOWNLOAD_CACHE_RO=1 export OGGM_EXTRACT_DIR="/work/$SLURM_JOB_USER/$SLURM_JOB_ID/oggm_tmp" module load oggm-binary-deps/4 python/3.8.5 source ~/.py3/bin/activate echo "Start papermill" papermill glathida_to_rgi_glacier_attribution.ipynb -p gtd_dir GlaThiDa_2016 # Print a final message so you can actually see it being done in the output log. echo "SLURM DONE"