{ "cells": [ { "cell_type": "markdown", "id": "ce57fc1a-b077-4a28-899b-67ffdf381190", "metadata": {}, "source": [ "# Analysis of initial states (area & volume) for different " ] }, { "cell_type": "code", "execution_count": 1, "id": "5094efa3-038c-4c92-977d-ec405cfa43e0", "metadata": {}, "outputs": [], "source": [ "import xarray as xr\n", "import numpy as np\n", "import pandas as pd\n", "import os\n", "import glob\n", "import seaborn as sns\n", "import matplotlib.pyplot as plt" ] }, { "cell_type": "code", "execution_count": 2, "id": "484e30bf-1c11-470b-a2a6-4830f11e6a8a", "metadata": {}, "outputs": [], "source": [ "run = False\n", "# rename the data ... ok I do that later ...\n", "if run:\n", " for rgi_reg in arange(1,20,1):\n", " if rgi_reg < 10:\n", " rgi_reg = '0'+str(rgi_reg)\n", " else:\n", " rgi_reg = str(rgi_reg)\n", "\n", " for tip in ['regional']:\n", " path = f'/home/www/lschuster/glacierMIP3_analysis/{model_author}/' #{tip}/{rgi_reg}/'\n", " # get all files with wrong name \n", " if tip == 'regional':\n", " files = glob.glob(f'{path}{model_author}_rgi{rgi_reg}_sum_ssp*_'+'psl_cm6a-lr')\n", " elif tip == 'per_glacier':\n", " files = glob.glob(f'{path}{model_author}_rgi{rgi_reg}_glaciers_ssp*')\n", " # first part of the filename where the end is the part that we want to remove (i.e. _ssp126, _ssp370, or _ssp585)\n", " # Dave put _ssp126 /_ssp585 ... once too often into the file , we only want to have it at the end of the file!!!\n", " if tip == 'regional':\n", " false_path_first = f'{path}{model_author}_rgi{rgi_reg}_sum_ssp126' # _ssp126, _ssp370, or _ssp585 always have the same amount of \"letter\" that have to be removed ...\n", " elif tip == 'per_glacier':\n", " false_path_first = f'{path}{model_author}_rgi{rgi_reg}_glaciers_ssp126' \n", " # amount of letters that correspond to the first part of the filename path that is replaced by a new filename path\n", " len_p_first =len(false_path_first)\n", " # remove the wrong letters\n", " correct_path_first = false_path_first[:-len('_ssp126')] \n", " for f in files:\n", " correct_filename = correct_path_first + f[len_p_first:]\n", " #print(f, correct_filename)\n", " os.rename(f, correct_filename)" ] }, { "cell_type": "code", "execution_count": 3, "id": "53c0744e-3b41-42e9-b8c5-1aece2827a6b", "metadata": {}, "outputs": [], "source": [ "model_authors = ['Huss','Rounce','Compagno','Kraaijenbrink','GLIMB'] ##'Huss',\n", "\n", "gcms = ['gfdl-esm4', 'ipsl-cm6a-lr', 'mpi-esm1-2-hr', 'mri-esm2-0', 'ukesm1-0-ll' ]\n", "scenarios = ['hist', #orical',\n", " 'ssp126', 'ssp370', 'ssp585']\n", "y0_times = [1851, 1901, 1951, 1995, 2021, 2041, 2061, 2081]\n", "\n", "# get the RGI area / ITMIX volumes of the glaciers\n", "rgi_regs = []\n", "for rgi_reg in np.arange(1,20,1):\n", " if rgi_reg < 10:\n", " rgi_reg = '0'+str(rgi_reg)\n", " else:\n", " rgi_reg = str(rgi_reg)\n", " rgi_regs.append(rgi_reg)\n", "import oggm\n", "df_itmix = pd.read_hdf(oggm.utils.get_demo_file('rgi62_itmix_df.h5'))\n", "import geopandas as gpd\n", "rgidf_dict = {}\n", "for rgi_reg in rgi_regs:\n", " path_rgi = oggm.utils.get_rgi_region_file(rgi_reg, version='6')\n", " rgidf = gpd.read_file(path_rgi)\n", " #Greenland periphery : all glaciers with connectivity level 0 and 1 should be included, level 2 glaciers should be excluded (as was the case for GlacierMIP2)\n", " # total RGI area: 89,651km2\n", " if rgi_reg == '05':\n", " rgidf = rgidf.loc[(rgidf['Connect'] == 0) | (rgidf['Connect'] ==1)]\n", " rgidf_dict[rgi_reg] = rgidf\n", " rgidf_dict[rgi_reg] = rgidf_dict[rgi_reg].set_index('RGIId')\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "b1cc4fa4-000b-4029-99ea-b3aabfa3f240", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "955\n" ] } ], "source": [ "path_rgi = oggm.utils.get_rgi_region_file('05', version='6')\n", "rgidf = gpd.read_file(path_rgi)\n", "rgidf_c = rgidf.loc[(rgidf['Connect'] == 0) | (rgidf['Connect'] ==1)]\n", "\n", "print(len(rgidf) - len(rgidf_c))" ] }, { "cell_type": "code", "execution_count": 28, "id": "14d3ce9e-b2ec-4ebb-8183-5d945e97326d", "metadata": {}, "outputs": [], "source": [ "def get_path(folder_path='/home/www/lschuster/glacierMIP3_analysis',\n", " rgi_reg='01',\n", " model_author='Rounce',\n", " period='1851-1870', gcm='gfdl-esm4', ssp='hist'):\n", " #folder_path = '/home/www/lschuster/glacierMIP3_analysis'\n", " if (model_author == 'Rounce'):\n", " path = f'/home/www/drounce/regional-filled-norunaway/{rgi_reg}/{model_author}_rgi{rgi_reg}_sum_{period}_{gcm}_{ssp}.nc'\n", " elif (model_author == 'OGGM') or (model_author == 'OGGM-VAS'):\n", " # was corrected for missing glaciers ... (by Lilian Schuster with method of Fabien)\n", " if 'OGGM' in model_author and ssp == 'hist':\n", " ssp = 'historical'\n", " path = f'{folder_path}/{model_author}/regional_filled/{rgi_reg}/{model_author}_rgi{rgi_reg}_sum_{period}_{gcm}_{ssp}_filled.nc'\n", " else:\n", " path = f'{folder_path}/{model_author}/regional_filled/{rgi_reg}/{model_author}_rgi{rgi_reg}_sum_{period}_{gcm}_{ssp}_filled.nc'\n", " else:\n", " if gcm=='ipsl-cm6a-lr':\n", " if model_author=='Compagno':\n", " gcm='psl_cm6a-lr' # no 'i' and '_' instead of '-'\n", " if model_author == 'Huss':\n", " # also Huss has some spelling \n", " gcm = 'ipsl_cm6a-lr'\n", " if model_author=='Kraaijenbrink':\n", " if ssp == 'hist':\n", " ssp='historical'\n", " path = f'{folder_path}/{model_author}/{model_author}_rgi{rgi_reg}_sum_{period}_{gcm}_{ssp}.nc'\n", " return path\n", "\n", "# dummy dataset ... w. 5001 entries\n", "ds_nan_5001 = xr.open_dataset(get_path(rgi_reg='01'))\n", "ds_nan_5001.volume_m3.data[...] = np.NaN\n", "ds_nan_5001.area_m2.data[...] = np.NaN\n", "\n", "# dummy dataset ... w. 2001 entries\n", "ds_nan_2001 = xr.open_dataset(get_path(rgi_reg='02')) \n", "ds_nan_2001.volume_m3.data[...] = np.NaN\n", "ds_nan_2001.area_m2.data[...] = np.NaN\n", "\n", "\n", "# these regions should run until 5000 years:\n", "rgi_regs_5000 = ['01', '03', '04', '05', '06','07', '09', '17','19']" ] }, { "cell_type": "code", "execution_count": 29, "id": "3a8f927c-f17f-40e1-9813-f375a40aca22", "metadata": {}, "outputs": [], "source": [ "run = False\n", "if run:\n", " model_authors = ['OGGM', 'OGGM-VAS']\n", " l_ds_oggm = []\n", " for model_author in model_authors:\n", " missing_exp=[]\n", " _ds_reg = []\n", " for rgi_reg in rgi_regs: #'01'\n", " # want to have the right simulation year length\n", " _l_period = []\n", " for y0_time in y0_times[:4]:\n", " period = f'{y0_time}-{y0_time+19}'\n", " _l_scenario = []\n", " for scenario in scenarios[:1]:\n", " _l_gcm = []\n", " for gcm in gcms:\n", " path = get_path(rgi_reg=rgi_reg, model_author=model_author, period=period, gcm=gcm, ssp=scenario)\n", " try:\n", " if model_author == 'Compagno' or model_author == 'Huss':\n", " _ds = xr.open_dataset(path)\n", " # wrong netcdf shape and simulation year 1 year too short ...\n", " if len(_ds.volume_m3.squeeze()) == 5000:\n", " ds = ds_nan_5001.copy(deep=True)\n", " elif len(_ds.volume_m3.squeeze()) == 2000:\n", " ds = ds_nan_2001.copy(deep=True)\n", " if model_author == 'Huss':\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze() \n", " else:\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze()*1e9 # is in km3 instead of m3\n", " ds.area_m2.data[:-1] = _ds.area_m2.squeeze()\n", " ds.volume_m3[-1] = ds.volume_m3[-2].values\n", " ds.area_m2[-1] = ds.area_m2[-2].values\n", " ds.attrs.update(_ds.attrs)\n", " else:\n", " ds = xr.open_dataset(path)\n", " except:\n", " # in case of OGGM 11, went for 5000 yrs anyways\n", " ds = ds_nan_5001.copy(deep=True)\n", "\n", " #if rgi_reg in rgi_regs_5000:\n", " # ds = ds_nan_5001.copy(deep=True)\n", " #else:\n", " # ds = ds_nan_2001.copy(deep=True)\n", " missing_exp.append(path)\n", "\n", " ds = ds.reset_coords()[['volume_m3', 'area_m2']]\n", " ds = ds.expand_dims({'gcm':[gcm], 'ssp':[scenario], 'period':[period], 'rgi_reg':[rgi_reg]})\n", " _l_gcm.append(ds)\n", " _l_scenario.append(xr.concat(_l_gcm, dim='gcm'))\n", "\n", " _l_period.append(xr.concat(_l_scenario, dim='ssp'))\n", " ds_past = xr.concat(_l_period, dim='period')\n", "\n", " _l_period = []\n", " for y0_time in y0_times[4:]:\n", " period = f'{y0_time}-{y0_time+19}'\n", " _l_scenario = []\n", " for scenario in scenarios[1:]:\n", " _l_gcm = []\n", " for gcm in gcms:\n", " path = get_path(rgi_reg=rgi_reg, model_author=model_author, period=period, gcm=gcm, ssp=scenario)\n", " try:\n", " if model_author == 'Compagno' or model_author == 'Huss':\n", " _ds = xr.open_dataset(path)\n", " # wrong netcdf shape and simulation year 1 year too short ...\n", " if len(_ds.volume_m3.squeeze()) == 5000:\n", " ds = ds_nan_5001.copy(deep=True)\n", " elif len(_ds.volume_m3.squeeze()) == 2000:\n", " ds = ds_nan_2001.copy(deep=True)\n", " if model_author == 'Huss':\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze() \n", " else:\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze()*1e9 # is in km3 instead of m3\n", " ds.area_m2.data[:-1] = _ds.area_m2.squeeze()\n", " ds.volume_m3[-1] = ds.volume_m3[-2].values\n", " ds.area_m2[-1] = ds.area_m2[-2].values\n", " ds.attrs.update(_ds.attrs)\n", " else:\n", " ds = xr.open_dataset(path)\n", " except:\n", " ds = ds_nan_5001.copy(deep=True)\n", "\n", " #if rgi_reg in rgi_regs_5000:\n", " # ds = ds_nan_5001.copy(deep=True)\n", " #else:\n", " # ds = ds_nan_2001.copy(deep=True)\n", " missing_exp.append(path)\n", " ds = ds.reset_coords()[['volume_m3', 'area_m2']]\n", " ds = ds.expand_dims({'gcm':[gcm], 'ssp':[scenario], 'period':[period], 'rgi_reg':[rgi_reg]})\n", " _l_gcm.append(ds)\n", " _l_scenario.append(xr.concat(_l_gcm, dim='gcm'))\n", " _l_period.append(xr.concat(_l_scenario, dim='ssp'))\n", " ds_future = xr.concat(_l_period, dim='period')\n", " _ds_reg_single = xr.concat([ds_past, ds_future], dim = 'ssp')\n", " _ds_reg_single\n", " _ds_reg.append(_ds_reg_single)\n", " ds = xr.concat(_ds_reg, dim='rgi_reg')\n", "\n", " # expand the rgi regions that only run until 5000 by using the last simulated year\n", " for rgi_reg_id,rgi_reg in enumerate(rgi_regs): #'01'\n", " if np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).volume_m3.values)) and np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).area_m2.values)):\n", " # ok we do not have any regional data for that region and model_author, just keep the values np.NaN...\n", " pass\n", " elif rgi_reg in rgi_regs_5000:\n", " # check that not all are nan-values!\n", " assert not np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).sel(simulation_year=slice(2001,5001)).volume_m3.values))\n", " else: \n", " try:\n", " # check that it is really always np.NaN values after simulation year 2000 for that region\n", " assert np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).sel(simulation_year=slice(2001,5001)).volume_m3.values))\n", " assert np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).sel(simulation_year=slice(2001,5001)).area_m2.values))\n", "\n", " # fill them up with the last simulation year values \n", " for y in np.arange(2001,5001):\n", " ds['volume_m3'].data[..., rgi_reg_id, y] = ds.sel(rgi_reg =rgi_reg).sel(simulation_year=2000).volume_m3.values\n", " ds['area_m2'].data[..., rgi_reg_id, y] = ds.sel(rgi_reg =rgi_reg).sel(simulation_year=2000).area_m2.values\n", " except:\n", " # ok some models did run over all regions for 5000 years\n", " print('runs for 5000 years: ' , model_author, rgi_reg)\n", " pass\n", "\n", " _ds_oggm = ds.expand_dims({'model_author':[model_author]})\n", " l_ds_oggm.append(_ds_oggm)\n", " #if 'model_author' == 'Compagno':\n", " # ds['volume_m3']=ds['volume_m3']*1e9 # is in km3 instead of m3\n", "\n", "\n", " ds_reg_models_oggm = xr.concat(l_ds_oggm, dim='model_author')\n", "\n", " #ds_reg_models.to_netcdf(out_path)\n", " # This is the same for all files\n", " encoding = {\n", " 'simulation_year': {\"dtype\": \"int16\"},\n", " 'volume_m3': {\"dtype\": \"float32\"},\n", " 'area_m2': {\"dtype\": \"float32\"},\n", " }\n", " out_path = '/home/www/lschuster/glacierMIP3_analysis/glacierMIP3_oggm_models_all_rgi_regions_sum.nc'\n", " ds_reg_models_oggm.to_netcdf(out_path, encoding = encoding)" ] }, { "cell_type": "code", "execution_count": 32, "id": "247bdc0d-aa01-4772-a238-6f75eb46f889", "metadata": {}, "outputs": [], "source": [ "# dummy dataset ... w. 5001 entries\n", "ds_nan_5001 = xr.open_dataset(get_path(rgi_reg='01'))\n", "ds_nan_5001.volume_m3.data[...] = np.NaN\n", "ds_nan_5001.area_m2.data[...] = np.NaN\n", "\n", "# dummy dataset ... w. 2001 entries\n", "ds_nan_2001 = xr.open_dataset(get_path(rgi_reg='02')) \n", "ds_nan_2001.volume_m3.data[...] = np.NaN\n", "ds_nan_2001.area_m2.data[...] = np.NaN\n", "\n", "# these regions should run until 5000 years:\n", "rgi_regs_5000 = ['01', '03', '04', '05', '06','07', '09', '17','19']\n", "\n", "model_authors = ['Huss','Rounce','Compagno','Kraaijenbrink','GLIMB'] ##'Huss',\n", "\n", "### regional (uncorrected, except for Rounce)\n", "missing_exp = []\n", "run = False #True\n", "if run:\n", " ds_m = []\n", " for model_author in model_authors:\n", " _ds_reg = []\n", " for rgi_reg in rgi_regs: #'01'\n", " # want to have the right simulation year length\n", " _l_period = []\n", " for y0_time in y0_times[:4]:\n", " period = f'{y0_time}-{y0_time+19}'\n", " _l_scenario = []\n", " for scenario in scenarios[:1]:\n", " _l_gcm = []\n", " for gcm in gcms:\n", " path = get_path(rgi_reg=rgi_reg, model_author=model_author, period=period, gcm=gcm, ssp=scenario)\n", " try:\n", " if model_author == 'Compagno' or model_author == 'Huss':\n", " _ds = xr.open_dataset(path)\n", " # wrong netcdf shape and simulation year 1 year too short ...\n", " if len(_ds.volume_m3.squeeze()) == 5000:\n", " ds = ds_nan_5001.copy(deep=True)\n", " elif len(_ds.volume_m3.squeeze()) == 2000:\n", " ds = ds_nan_2001.copy(deep=True)\n", " if model_author == 'Huss':\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze() \n", " else:\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze()*1e9 # is in km3 instead of m3\n", " ds.area_m2.data[:-1] = _ds.area_m2.squeeze()\n", " ds.volume_m3[-1] = ds.volume_m3[-2].values\n", " ds.area_m2[-1] = ds.area_m2[-2].values\n", " ds.attrs.update(_ds.attrs)\n", " else:\n", " ds = xr.open_dataset(path)\n", " except:\n", " if rgi_reg in rgi_regs_5000:\n", " ds = ds_nan_5001.copy(deep=True)\n", " else:\n", " ds = ds_nan_2001.copy(deep=True)\n", " missing_exp.append(path)\n", "\n", " \n", " ds = ds.expand_dims({'gcm':[gcm], 'ssp':[scenario], 'period':[period], 'rgi_reg':[rgi_reg]})\n", " _l_gcm.append(ds)\n", " _l_scenario.append(xr.concat(_l_gcm, dim='gcm'))\n", "\n", " _l_period.append(xr.concat(_l_scenario, dim='ssp'))\n", " ds_past = xr.concat(_l_period, dim='period')\n", "\n", " _l_period = []\n", " for y0_time in y0_times[4:]:\n", " period = f'{y0_time}-{y0_time+19}'\n", " _l_scenario = []\n", " for scenario in scenarios[1:]:\n", " _l_gcm = []\n", " for gcm in gcms:\n", " path = get_path(rgi_reg=rgi_reg, model_author=model_author, period=period, gcm=gcm, ssp=scenario)\n", " try:\n", " if model_author == 'Compagno' or model_author == 'Huss':\n", " _ds = xr.open_dataset(path)\n", " # wrong netcdf shape and simulation year 1 year too short ...\n", " if len(_ds.volume_m3.squeeze()) == 5000:\n", " ds = ds_nan_5001.copy(deep=True)\n", " elif len(_ds.volume_m3.squeeze()) == 2000:\n", " ds = ds_nan_2001.copy(deep=True)\n", " if model_author == 'Huss':\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze() \n", " else:\n", " ds.volume_m3.data[:-1] = _ds.volume_m3.squeeze()*1e9 # is in km3 instead of m3\n", " ds.area_m2.data[:-1] = _ds.area_m2.squeeze()\n", " ds.volume_m3[-1] = ds.volume_m3[-2].values\n", " ds.area_m2[-1] = ds.area_m2[-2].values\n", " ds.attrs.update(_ds.attrs)\n", " else:\n", " ds = xr.open_dataset(path)\n", " except:\n", " if rgi_reg in rgi_regs_5000:\n", " ds = ds_nan_5001.copy(deep=True)\n", " else:\n", " ds = ds_nan_2001.copy(deep=True)\n", " missing_exp.append(path)\n", " ds = ds.expand_dims({'gcm':[gcm], 'ssp':[scenario], 'period':[period], 'rgi_reg':[rgi_reg]})\n", " _l_gcm.append(ds)\n", " _l_scenario.append(xr.concat(_l_gcm, dim='gcm'))\n", " _l_period.append(xr.concat(_l_scenario, dim='ssp'))\n", " ds_future = xr.concat(_l_period, dim='period')\n", " _ds_reg_single = xr.concat([ds_past, ds_future], dim = 'ssp')\n", " _ds_reg_single\n", " _ds_reg.append(_ds_reg_single)\n", "\n", " ds = xr.concat(_ds_reg, dim='rgi_reg')\n", "\n", " # expand the rgi regions that only run until 5000 by using the last simulated year\n", " for rgi_reg_id,rgi_reg in enumerate(rgi_regs): #'01'\n", " if np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).volume_m3.values)) and np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).area_m2.values)):\n", " # ok we do not have any regional data for that region and model_author, just keep the values np.NaN...\n", " pass\n", " elif rgi_reg in rgi_regs_5000:\n", " # check that not all are nan-values!\n", " assert not np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).sel(simulation_year=slice(2001,5001)).volume_m3.values))\n", " else: \n", " try:\n", " # check that it is really always np.NaN values after simulation year 2000 for that region\n", " assert np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).sel(simulation_year=slice(2001,5001)).volume_m3.values))\n", " assert np.all(np.isnan(ds.sel(rgi_reg =rgi_reg).sel(simulation_year=slice(2001,5001)).area_m2.values))\n", "\n", " # fill them up with the last simulation year values \n", " for y in np.arange(2001,5001):\n", " ds['volume_m3'].data[..., rgi_reg_id, y] = ds.sel(rgi_reg =rgi_reg).sel(simulation_year=2000).volume_m3.values\n", " ds['area_m2'].data[..., rgi_reg_id, y] = ds.sel(rgi_reg =rgi_reg).sel(simulation_year=2000).area_m2.values\n", " except:\n", " # ok some models did run over all regions for 5000 years\n", " print('runs for 5000 years: ' , model_author, rgi_reg)\n", " pass\n", "\n", " ds = ds.expand_dims({'model_author':[model_author]})\n", " #if 'model_author' == 'Compagno':\n", " # ds['volume_m3']=ds['volume_m3']*1e9 # is in km3 instead of m3\n", " ds_m.append(ds)\n", " \n", " ds_reg_models = xr.concat(ds_m, dim='model_author', combine_attrs='drop')\n", " out_path = '/home/www/lschuster/glacierMIP3_analysis/glacierMIP3_5models_all_rgi_regions_sum_no_run_away.nc'\n", " \n", " ### also open Dave's raw data\n", " # daves old data \n", " #ds_rounce_unfilled = xr.open_dataset('/home/www/lschuster/glacierMIP3_analysis/glacierMIP3_Rounce_all_rgi_regions_sum.nc')\n", " #ds_rounce_unfilled.coords['model_author'].data[...] = 'Rounce_unfilled'\n", " #ds_reg_models = xr.concat([ds_rounce_unfilled, ds_reg_models], dim='model_author')\n", " \n", " #ds_reg_models.to_netcdf(out_path)\n", " # This is the same for all files\n", " encoding = {\n", " 'simulation_year': {\"dtype\": \"int16\"},\n", " 'volume_m3': {\"dtype\": \"float32\"},\n", " 'area_m2': {\"dtype\": \"float32\"},\n", " }\n", " ds_reg_models.to_netcdf(out_path, encoding = encoding)\n", "else:\n", " out_path = '/home/www/lschuster/glacierMIP3_analysis/glacierMIP3_5models_all_rgi_regions_sum_no_run_away.nc'\n", "\n", " ds_reg_models = xr.open_dataset(out_path)\n", " out_path_oggm = '/home/www/lschuster/glacierMIP3_analysis/glacierMIP3_oggm_models_all_rgi_regions_sum.nc'\n", " ds_reg_models_oggm_versions = xr.open_dataset(out_path_oggm)\n", " ds_reg_models = xr.concat([ds_reg_models, ds_reg_models_oggm_versions], dim='model_author')\n", "# actually Fabi said I should rather have such a coordinate ... at some point I should change that ...\n", "#ds_reg_models.coords['ssp_period'] = ds_reg_models.period +'_' + ds_reg_models.ssp" ] }, { "cell_type": "code", "execution_count": 33, "id": "7512df9e-c1b1-4021-add4-1789e2005d67", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
<xarray.Dataset>\n",
"Dimensions: (model_author: 7, gcm: 5, period: 8,\n",
" simulation_year: 5001, ssp: 4, rgi_reg: 19)\n",
"Coordinates:\n",
" * model_author (model_author) object 'Huss' 'Rounce' ... 'OGGM' 'OGGM-VAS'\n",
" * gcm (gcm) object 'gfdl-esm4' 'ipsl-cm6a-lr' ... 'ukesm1-0-ll'\n",
" * period (period) object '1851-1870' '1901-1920' ... '2081-2100'\n",
" * simulation_year (simulation_year) int16 0 1 2 3 4 ... 4997 4998 4999 5000\n",
" * ssp (ssp) object 'hist' 'ssp126' 'ssp370' 'ssp585'\n",
" * rgi_reg (rgi_reg) object '01' '02' '03' '04' ... '17' '18' '19'\n",
"Data variables:\n",
" volume_m3 (model_author, gcm, ssp, period, rgi_reg, simulation_year) float32 ...\n",
" area_m2 (model_author, gcm, ssp, period, rgi_reg, simulation_year) float32 ...<xarray.DataArray 'model_author' (model_author: 7)>\n",
"array(['Huss', 'Rounce', 'Compagno', 'Kraaijenbrink', 'GLIMB', 'OGGM',\n",
" 'OGGM-VAS'], dtype=object)\n",
"Coordinates:\n",
" * model_author (model_author) object 'Huss' 'Rounce' ... 'OGGM' 'OGGM-VAS'