diff --git a/__init__.py b/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data/fieldlist_NCAR.jsonc b/data/fieldlist_CESM.jsonc similarity index 92% rename from data/fieldlist_NCAR.jsonc rename to data/fieldlist_CESM.jsonc index a51014815..fae2c8ed6 100644 --- a/data/fieldlist_NCAR.jsonc +++ b/data/fieldlist_CESM.jsonc @@ -5,7 +5,7 @@ // Source: https://www.cesm.ucar.edu/models/cesm2/atmosphere/docs/ug6/hist_flds_f2000.html // CF variables not on that list are commented out { - "name" : "NCAR", + "name" : "CESM", "models": ["CAM4", "CESM2", "CESM"], // others? "coords" : { // only used for taking slices, unit conversion @@ -13,6 +13,7 @@ "lat": {"axis": "Y", "standard_name": "latitude", "units": "degrees_north"}, "TLONG": {"axis": "X", "standard_name": "array of t-grid longitudes", "units": "degrees_east"}, "TLAT": {"axis": "Y", "standard_name": "array of t-grid latitudes", "units": "degrees_north"}, + "plev": { "standard_name": "air_pressure", "units": "hPa", @@ -48,12 +49,14 @@ "variables" : { "U": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "U{value}"}, "ndim": 4 }, "V": { "standard_name": "northward_wind", + "realm":"atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "V{value}"}, "ndim": 4 @@ -61,67 +64,79 @@ "Z3": { "standard_name": "geopotential_height", "units": "m", + "realm": "atmos", // note: 4d name is 'Z3' but Z500 = height at 500 mb, etc. "scalar_coord_templates": {"plev": "Z{value}"}, "ndim": 4 }, "Z500": { "standard_name": "geopotential_height_500mb", + "realm": "atmos", "units": "m", // note: 4d name is 'Z3' but Z500 = height at 500 mb, etc. "ndim": 3 }, "Q": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 4 }, "OMEGA": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", "scalar_coord_templates": {"plev": "OMEGA{value}"}, "ndim": 4 }, "TS": { "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "ndim": 3 }, "PS": { "standard_name": "surface_air_pressure", + "realm": "atmos", // note: not hPa "units": "Pa", "ndim": 3 }, "PRECT": { "standard_name": "precipitation_rate", + "realm": "atmos", "units": "m s-1", "ndim": 3 }, "PRECC": { "standard_name": "convective_precipitation_rate", + "realm": "atmos", "units": "m s-1", "ndim": 3 }, "TREFHT" : { // correct name? CMIP6 equivalent should be tas, temp at 2m ref height "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 3, "modifier": "atmos_height" }, "TAUX": { "standard_name": "surface_downward_eastward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "TAUY": { "standard_name": "surface_downward_northward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "PSL": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", "ndim": 3 }, @@ -149,21 +164,25 @@ "FLUS": { // correct name? Guessed according to pattern -- no FLUS, only the net combination? "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "FLDS": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "FLUT": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "FLNT": { "standard_name": "net_upward_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, @@ -229,32 +248,37 @@ }, "SHFLX": { "standard_name": "surface_upward_sensible_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "LHFLX": { "standard_name": "surface_upward_latent_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "zos": { "standard_name": "sea_surface_height_above_geoid", + "realm": "ocean", "units": "m", "ndim": 3 }, "tauuo": { "standard_name": "surface_downward_x_stress", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "tauvo": { "standard_name": "surface_downward_y_stress", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "areacello": { "standard_name": "cell_area", - "modifier": "ocean_realm", + "realm": "ocean", "units": "m2", "ndim": 2 }, @@ -281,11 +305,13 @@ // }, "THETAL": { "standard_name": "sea_water_potential_temperature", + "realm": "ocean", "units": "K", "ndim": 4 }, "SST": { "standard_name": "Potential Temperature", + "realm": "ocean", "units": "degC", "ndim": 4 }, @@ -293,6 +319,7 @@ // ta: 3D temperature, units = K: "T": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 4 }, @@ -300,6 +327,7 @@ "prw": { // check name: is this column integrated? "standard_name": "atmosphere_mass_content_of_water_vapor", + "realm": "atmos", "units": "kg m-2", "ndim": 3 } diff --git a/data/fieldlist_CMIP.jsonc b/data/fieldlist_CMIP.jsonc index 2214efb2b..a38ac77f2 100644 --- a/data/fieldlist_CMIP.jsonc +++ b/data/fieldlist_CMIP.jsonc @@ -3,33 +3,17 @@ // in strings, so you'll want to turn word wrap on in your editor. // { - "name" : "CMIP", + "name": "CMIP", "models": ["CMIP_GFDL"], - "coords" : { - // only used for taking slices, unit conversion - "lon": {"axis": "X", "standard_name": "longitude", "units": "degrees_east"}, - "lat": {"axis": "Y", "standard_name": "latitude", "units": "degrees_north"}, - "time": {"axis": "T", "standard_name": "time", "units": "days"}, - // eventually want contents of CMIP6.coordinate.json + "coords": { "plev": { "standard_name": "air_pressure", "units": "hPa", "positive": "down", "axis": "Z" }, - "standard_hybrid_sigma": { - "standard_name": "atmosphere_hybrid_sigma_pressure_coordinate", - "units": "1", - "axis": "Z", - "positive": "down" - }, - "lev": { - "standard_name": "depth", - "units": "m", - "positive": "down", - "axis": "Z" - } - }, + "$ref": "./cmip6-cmor-tables/Tables/CMIP6_coordinate.json" + }, "aux_coords": { "deptho": { "standard_name": "sea_floor_depth_below_geoid", @@ -45,6 +29,7 @@ "variables" : { "ua": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "ua{value}"}, "ndim": 4 @@ -57,99 +42,115 @@ }, "va": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "va{value}"}, "ndim": 4 }, "wind_speed": { "standard_name": "wind_speed", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "wind_speed{value}"}, "ndim": 4 }, "zg": { "standard_name": "geopotential_height", + "realm": "atmos", "units": "m", "scalar_coord_templates": {"plev": "zg{value}"}, "ndim": 4 }, "hus": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 4 }, "wap": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", "scalar_coord_templates": {"plev": "wap{value}"}, "ndim": 4 }, "o3": { "standard_name": "mole_fraction_of_ozone_in_air", + "realm": "atmos", "units": "mol mol-1", "ndim": 4 }, "ts": { "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "ndim": 3 }, "huss": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 3, "modifier": "atmos_height" }, "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "ndim": 3 }, "prc": { "standard_name": "convective_precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "ndim": 3 }, "tp": { "standard_name": "total_precipitation", + "realm": "atmos", "units": "kg m-2", "ndim": 3 }, "tas" : { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 3, "modifier": "atmos_height" }, "tauu": { "standard_name": "surface_downward_eastward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "tauv": { "standard_name": "surface_downward_northward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "areacello": { "standard_name": "cell_area", - "modifier" : "ocean_realm", + "realm": "ocean", "units": "m2", "ndim": 2 }, "areacella": { "standard_name": "cell_area", + "realm": "atmos", "units": "m2", - "modifier" : "atmos_realm", "ndim": 2 }, "ps": { "standard_name": "surface_air_pressure", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "psl": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", "ndim": 3 }, @@ -160,6 +161,7 @@ }, "sfcWind": { "standard_name": "wind_speed", + "realm": "atmos", "units": "m s-1", "modifier": "atmos_height", "ndim": 3 @@ -172,31 +174,37 @@ // radiative fluxes: "rsus": { "standard_name": "surface_upwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsds": { "standard_name": "surface_downwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsdt": { "standard_name": "toa_incoming_shortwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsut": { "standard_name": "toa_outgoing_shortwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rlus": { "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rlds": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, @@ -218,67 +226,80 @@ }, "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsdscs": { "standard_name": "surface_downwelling_shortwave_flux_in_air_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsuscs": { "standard_name": "surface_upwelling_shortwave_flux_in_air_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rlutcs": { "standard_name": "toa_outgoing_longwave_flux_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsutcs": { "standard_name": "toa_outgoing_shortwave_flux_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "hfss": { "standard_name": "surface_upward_sensible_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "hfls": { "standard_name": "surface_upward_latent_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, // Variables for AMOC_3D_Structure module: "uo": { "standard_name": "sea_water_x_velocity", + "realm": "ocean", "units": "m s-1", "ndim": 4 }, "vo": { "standard_name": "sea_water_y_velocity", + "realm": "ocean", "units": "m s-1", "ndim": 4 }, "so": { "standard_name": "sea_water_salinity", + "realm": "ocean", "units": "psu", "ndim": 4 }, "umo": { "standard_name": "ocean_mass_x_transport", + "realm": "ocean", "units": "kg s-1", "ndim": 4 }, "vmo": { "standard_name": "ocean_mass_y_transport", + "realm": "ocean", "units": "kg s-1", "ndim": 4 }, "thetao": { "standard_name": "sea_water_potential_temperature", + "realm": "ocean", "units": "degC", "ndim": 4 }, @@ -286,6 +307,7 @@ // ta: 3D temperature, units = K: "ta": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "scalar_coord_templates" : {"plev": "ta{value}"}, "ndim": 4 @@ -293,38 +315,45 @@ // prw: Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) "prw": { "standard_name": "atmosphere_mass_content_of_water_vapor", + "realm": "atmos", "units": "kg m-2", "ndim": 3 }, // Variables for SM_ET_coupling module "mrsos": { "standard_name": "mass_content_of_water_in_soil_layer", + "realm": "land", "units": "kg m-2", "ndim": 3 }, "evspsbl": { "standard_name": "water_evapotranspiration_flux", + "realm": "land", "units": "kg m-2 s-1", "ndim": 3 }, // Ice-Ocean variables "siconc": { "standard_name": "sea_ice_area_fraction", + "realm": "seaIce", "units": "%", "ndim": 3 }, "tauuo": { "standard_name": "downward_x_stress_at_sea_water_surface", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "tauvo": { "standard_name": "downward_y_stress_at_sea_water_surface", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "zos": { "standard_name": "sea_surface_height_above_geoid", + "realm": "ocean", "units": "m", "ndim": 3 } diff --git a/data/fieldlist_GFDL.jsonc b/data/fieldlist_GFDL.jsonc index e13a994d2..980512bc5 100644 --- a/data/fieldlist_GFDL.jsonc +++ b/data/fieldlist_GFDL.jsonc @@ -47,29 +47,34 @@ "variables" : { "ucomp": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "u{value}"}, "ndim": 4 }, "vcomp": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "v{value}"}, "ndim": 4 }, "hght": { "standard_name": "geopotential_height", + "realm": "atmos", "units": "m", "scalar_coord_templates": {"plev": "hght{value}"}, "ndim": 4 }, "sphum": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 4 }, "omega": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", // need to verify "scalar_coord_templates": {"plev": "omega{value}"}, "ndim": 4 @@ -77,79 +82,94 @@ "t_surf": { // "skin temperature", analogue of ts "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "ndim": 3 }, "precip": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "ndim": 3 }, "prec_conv": { "standard_name": "convective_precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", // need to verify "ndim": 3 }, "t_ref" : { // CMIP6 equivalent = tas, temp at 2m ref height "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 3, "modifier": "atmos_height" }, "ps": { "standard_name": "surface_air_pressure", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, "tau_x": { "standard_name": "surface_downward_eastward_stress", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, "tau_y": { "standard_name": "surface_downward_northward_stress", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, "slp": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, // radiative fluxes: "swup_sfc": { "standard_name": "surface_upwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "swdn_sfc": { "standard_name": "surface_downwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "swdn_toa": { "standard_name": "toa_incoming_shortwave_flux", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "swup_toa": { "standard_name": "toa_outgoing_shortwave_flux", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "lwup_sfc": { "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "lwdn_sfc": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "olr": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, @@ -256,6 +276,7 @@ // }, "salt": { "standard_name": "sea_water_salinity", + "realm": "ocean", "units": "psu", "ndim": 4 }, @@ -275,12 +296,14 @@ // Variables for Convective Transition Diagnostics module: "temp": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 4 }, "WVP": { // column integral; over the whole column? "standard_name": "atmosphere_mass_content_of_water_vapor", + "realm": "atmos", "units": "kg m-2", "ndim": 3 } diff --git a/data/modifiers.jsonc b/data/modifiers.jsonc index 51434c9a0..c65add153 100644 --- a/data/modifiers.jsonc +++ b/data/modifiers.jsonc @@ -5,11 +5,5 @@ { "atmos_height" : { "description" : "atmospheric height above the ground (relative to orography) in meters" - }, - "atmos_realm" : { - "description" : "designator for a variable belonging to the atmosphere realm that shares a standard_name with a variable in a different realm" - }, - "ocean_realm" : { - "description" : "designator for a variable belonging to the ocean realm that shares a standard_name with a variable in a different realm" } } diff --git a/diagnostics/ENSO_MSE/ENSO_MSE.html b/diagnostics/ENSO_MSE/ENSO_MSE.html index 796b05a29..66f6c8c6b 100644 --- a/diagnostics/ENSO_MSE/ENSO_MSE.html +++ b/diagnostics/ENSO_MSE/ENSO_MSE.html @@ -6,7 +6,6 @@
This POD package consists of four levels. With a focus on identifying leading processes that determine ENSO related precipitation anomalies, main module of the POD estimates @@ -21,9 +20,10 @@
Documentation and Contact Information
-ENSO moist static energy variability diagnostics
+< color=navy>
RESULTS: SCATTER PLOTS MSE TERMS versus PRECIPITATION
+< color="#000080">RESULTS: SCATTER PLOTS MSE TERMS versus PRECIPITATION
-Domain: Central Pacific - +
< color="#000080">Domain: Central Pacific< color="#000080">
- Precipitation versus +< color="#000080">Precipitation versus> |
- graphics +< color="#000080">graphics |
- Horizontal moist advection +< color="#000080">Horizontal moist advection |
- + |
- Net Radiative Flux (Frad) +< color="#000080">Net Radiative Flux (Frad) |
- + |
- Vertical Advection of Moist Static Energy +< color="#000080">Vertical Advection of Moist Static Energy |
- + |
- Total heat flux THF +< color="#000080">Total heat flux THF |
- + |
Domain: Eastern Pacific
+< color="#000080">Domain: Eastern Pacific
- Precipitation versus +< color="#000080">Precipitation versus |
- graphics +< color="#000080">graphics |
- Horizontal moist advection +< color="#000080">Horizontal moist advection |
- + |
- Net Radiative Flux (Frad) +< color="#000080">Net Radiative Flux (Frad) |
- + |
- Vertical Advection of Moist Static Energy +< color="#000080">Vertical Advection of Moist Static Energy |
- + |
- Total heat flux THF +< color="#000080">Total heat flux THF |
- + |
-ENSO Rossby wave diagnostics
+< color=navy>
-
RESULTS: LEVEL 05 - +
< color="#000080">RESULTS: LEVEL 05< color="#000080">< color="#000080"> +
- El Nino +< color="#000080">El Nino |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
- Positive precipitation anomaly vs divergence anomaly +< color="#000080">Positive precipitation anomaly vs divergence anomaly |
- + | |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
- Positive precipitation anomaly vs RWS 1 +< color="#000080">Positive precipitation anomaly vs RWS 1 |
- + | |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
- Positive precipitation anomaly vs RWS 2 + < color="#000080">Positive precipitation anomaly vs RWS 2> |
- + | |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Diagnostics of the Madden-Julian Oscillation + | < color=navy> Diagnostics of the Madden-Julian Oscillation | @@ -55,5 +51,4 @@ MJO Propagation and Amplitude Diagnostic (Jiang, UCLA) | MJO amplitude vs Convective moisture adjustment time-scale | plot |
---|---|---|---|---|
This module computes many of the diagnostics described by the WGNE - MJO Task Force and developed by Dennis Shea for observational data. Using daily + MJO Task Force and developed + by Dennis Shea for observational data. Using daily precipitation, outgoing longwave radiation, zonal wind at 850 and 200 hPa and meridional wind at 200 hPa, the module computes anomalies, bandpass-filters for the 20-100 day period, calculates the MJO Index as diff --git a/diagnostics/MJO_suite/MJO_suite.py b/diagnostics/MJO_suite/MJO_suite.py index b02fcb0be..ada67005f 100644 --- a/diagnostics/MJO_suite/MJO_suite.py +++ b/diagnostics/MJO_suite/MJO_suite.py @@ -9,9 +9,11 @@ import subprocess import time -#============================================================ +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_plots(nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call @@ -27,13 +29,14 @@ def generate_ncl_plots(nclPlotFile): while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) - + print('WARNING', e.errno, e.strerror) return 0 -#============================================================ +# ============================================================ # Call NCL code here -#============================================================ +# ============================================================ + + if not os.path.exists(os.path.join(os.environ['DATADIR'], 'day')): os.makedirs(os.path.join(os.environ['DATADIR'], 'day')) @@ -52,7 +55,7 @@ def generate_ncl_plots(nclPlotFile): print("MJO spectra") generate_ncl_plots(os.environ["POD_HOME"]+"/mjo_spectra.ncl") -if os.path.isfile( os.environ["WK_DIR"]+"/model/netCDF/MJO_PC_INDEX.nc"): +if os.path.isfile( os.environ["WORK_DIR"]+"/model/netCDF/MJO_PC_INDEX.nc"): print("WARNING: MJO_PC_INDEX.nc already exists. Not re-running.") else: generate_ncl_plots(os.environ["POD_HOME"]+"/mjo_EOF_cal.ncl") diff --git a/diagnostics/MJO_suite/calc_utils.ncl b/diagnostics/MJO_suite/calc_utils.ncl index 22663ddf0..35c75b6ca 100644 --- a/diagnostics/MJO_suite/calc_utils.ncl +++ b/diagnostics/MJO_suite/calc_utils.ncl @@ -13,7 +13,7 @@ load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl" setfileoption("nc", "Format", getenv("MDTF_NC_FORMAT")) -;; Local variables for alll plot routines ;; +;; Local variables for all plot routines ;; pwks = "ps" ; Output format (ps,X11,png,gif) _FillValue = -999 ; Missing value for FLOAT variables. diff --git a/diagnostics/MJO_suite/daily_anom.ncl b/diagnostics/MJO_suite/daily_anom.ncl index 7803b40d6..5515f784a 100644 --- a/diagnostics/MJO_suite/daily_anom.ncl +++ b/diagnostics/MJO_suite/daily_anom.ncl @@ -16,10 +16,10 @@ time_coord = getenv("time_coord") lat_coord = getenv("lat_coord") lon_coord = getenv("lon_coord") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" datadir = getenv("DATADIR") -yr1 = stringtointeger(getenv("FIRSTYR")) -yr2 = stringtointeger(getenv("LASTYR")) +yr1 = stringtointeger(getenv("startdate")) +yr2 = stringtointeger(getenv("enddate")) start_date = yr1*10000+201 end_date = (yr2+1)*10000+101 diff --git a/diagnostics/MJO_suite/daily_netcdf.ncl b/diagnostics/MJO_suite/daily_netcdf.ncl index f14d41352..d73748968 100644 --- a/diagnostics/MJO_suite/daily_netcdf.ncl +++ b/diagnostics/MJO_suite/daily_netcdf.ncl @@ -29,7 +29,7 @@ debug_print("Starting...",routine_name,debug) casename = getenv("CASENAME") datadir = getenv("DATADIR") level = getenv("lev_coord") -wk_dir = getenv("WK_DIR")+"/model/netCDF/" +wk_dir = getenv("WORK_DIR")+"/model/netCDF/" file_u200 = getenv("U200_FILE") @@ -42,8 +42,8 @@ print("daily_netcdf.ncl reading "+file_pr+" for time coordinates.") print(" Assuming without checking that all have same time coordinates!") f = addfile(file_pr,"r") -yr1 = stringtointeger(getenv("FIRSTYR")) -yr2 = stringtointeger(getenv("LASTYR")) +yr1 = stringtointeger(getenv("startdate")) +yr2 = stringtointeger(getenv("enddate")) lat_coord = getenv("lat_coord") lon_coord = getenv("lon_coord") diff --git a/diagnostics/MJO_suite/mjo_EOF.ncl b/diagnostics/MJO_suite/mjo_EOF.ncl index eff47e947..35bd3a476 100644 --- a/diagnostics/MJO_suite/mjo_EOF.ncl +++ b/diagnostics/MJO_suite/mjo_EOF.ncl @@ -18,7 +18,7 @@ vars = (/"pr","rlut","u200","u850","v200","v850"/) casename = getenv("CASENAME") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" filename_pr = file_dir+casename+".pr.day.anom.nc" filename_rlut = file_dir+casename+".rlut.day.anom.nc" filename_u200 = file_dir+casename+".u200.day.anom.nc" diff --git a/diagnostics/MJO_suite/mjo_EOF_cal.ncl b/diagnostics/MJO_suite/mjo_EOF_cal.ncl index a72fcd3f7..f1280cbde 100644 --- a/diagnostics/MJO_suite/mjo_EOF_cal.ncl +++ b/diagnostics/MJO_suite/mjo_EOF_cal.ncl @@ -13,7 +13,7 @@ begin routine_name = "mjo_EOF_cal.ncl" casename = getenv("CASENAME") - file_dir = getenv("WK_DIR")+"/model/netCDF/" + file_dir = getenv("WORK_DIR")+"/model/netCDF/" neof = 2 diff --git a/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl b/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl index 17a9e83ef..2e6fc79af 100644 --- a/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl +++ b/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl @@ -14,7 +14,7 @@ routine_name = "mjo_lat_lat_lon.ncl" vars = (/"pr","u850"/) casename = getenv("CASENAME") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" filename_pr = file_dir+casename+".pr.day.anom.nc" filename_u850 = file_dir+casename+".u850.day.anom.nc" diff --git a/diagnostics/MJO_suite/mjo_life_cycle.ncl b/diagnostics/MJO_suite/mjo_life_cycle.ncl index 5487a508a..b21bb8dc2 100644 --- a/diagnostics/MJO_suite/mjo_life_cycle.ncl +++ b/diagnostics/MJO_suite/mjo_life_cycle.ncl @@ -14,7 +14,7 @@ begin routine_name = "mjo_life_cycle" casename = getenv("CASENAME") - file_dir = getenv("WK_DIR")+"/model/netCDF/" + file_dir = getenv("WORK_DIR")+"/model/netCDF/" latS = -20 latN = 20 diff --git a/diagnostics/MJO_suite/mjo_spectra.ncl b/diagnostics/MJO_suite/mjo_spectra.ncl index 2854ecde8..fdec489cd 100644 --- a/diagnostics/MJO_suite/mjo_spectra.ncl +++ b/diagnostics/MJO_suite/mjo_spectra.ncl @@ -15,7 +15,7 @@ vars = (/"pr","rlut","u200","u850","v200","v850"/) casename = getenv("CASENAME") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" filename_pr = file_dir+casename+".pr.day.anom.nc" filename_rlut = file_dir+casename+".rlut.day.anom.nc" filename_u200 = file_dir+casename+".u200.day.anom.nc" diff --git a/diagnostics/MJO_suite/settings.jsonc b/diagnostics/MJO_suite/settings.jsonc index aa6397413..c990440cc 100644 --- a/diagnostics/MJO_suite/settings.jsonc +++ b/diagnostics/MJO_suite/settings.jsonc @@ -8,7 +8,7 @@ "settings" : { "driver" : "MJO_suite.py", "long_name" : "MJO diagnostics suite (from AMWG variability diagnostic package)", - "realm" : "atmos", + "convention" : "cesm", "description" : "MJO CLIVAR suite (NCAR)", "runtime_requirements": { "python3": [], @@ -19,8 +19,17 @@ "frequency": "day" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "dimensions": { + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -32,34 +41,40 @@ "varlist": { "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"] }, "pr": { "standard_name": "precipitation_rate", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"] }, "u200": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 200} }, "u850": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 850} }, "v200": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 200} }, "v850": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 850} diff --git a/diagnostics/MJO_teleconnection/MJO_teleconnection.html b/diagnostics/MJO_teleconnection/MJO_teleconnection.html index fe59d06cf..7328516ea 100644 --- a/diagnostics/MJO_teleconnection/MJO_teleconnection.html +++ b/diagnostics/MJO_teleconnection/MJO_teleconnection.html @@ -13,13 +13,13 @@
-Full Documentation and Contact Information + + Full Documentation and Contact Information
-
Teleconnection skills v/s Mean state skills | plot - |
---|
Teleconnection pattern correlation averaged for all MJO phases (y axes) relative to the MJO E/W ratio, the dashed line indicates the observed E/W ratio, and the open circles represent the poor MJO models.
-Teleconnection pattern correlation averaged for all MJO phases (y axes) relative to the 250-hPa mean zonal wind RMS error, the plus signs show the model zonal wind RMS error over the full Pacific basin, while the filled circles indicate the longitudinal RMS error in the region of the sub- tropical jet. See text (Henderson et al. 2017 J Climate) for more detailed explanations.
+ < color=black> Teleconnection pattern correlation averaged for all MJO phases
+ (y axes) relative to the 250-hPa
mean zonal wind RMS error,
+ the plus signs show the model zonal wind RMS error over the full
Pacific basin, while the filled
+ circles indicate the longitudinal RMS error in the region of the sub-
tropical jet.
+ See text (Henderson et al. 2017 J Climate) for more detailed explanations.
+
MJO phase 1 pentad composites of anomalous 250-hPa geopotential height, where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO phase. Positive geopotential height anomalies are in red solid contours, and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour is omitted. Anomalies found to be 95% significantly different from zero are dotted. The color shading shows the anomalous tropical precipitation composite during MJO phase 1.
+ < color=black> MJO phase 1 pentad composites of anomalous 250-hPa geopotential height,
+ where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
+
phase. Positive geopotential height anomalies are in red solid contours, and negative
+ anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
+ is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO
phase 1.
+
MJO phase 3 pentad composites of anomalous 250-hPa geopotential height, where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO phase. Positive geopotential height anomalies are in red solid contours, and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour is omitted. Anomalies found to be 95% significantly different from zero are dotted. The color shading shows the anomalous tropical precipitation composite during MJO phase 3.
+ < color=black> MJO phase 3 pentad composites of anomalous 250-hPa geopotential height,
+ where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
+
phase. Positive geopotential height anomalies are in red solid contours, and negative
+
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
+ is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO
phase 3.
+
MJO phase 4 pentad composites of anomalous 250-hPa geopotential height, where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO phase. Positive geopotential height anomalies are in red solid contours, and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour is omitted. Anomalies found to be 95% significantly different from zero are dotted. The color shading shows the anomalous tropical precipitation composite during MJO phase 4.
+ < color=black> MJO phase 4 pentad composites of anomalous 250-hPa geopotential
+ height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following
+ an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative
+
anomalies are in blue dashed contours. Contours are every 10 m, and the zero
+ contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+
The color shading shows the anomalous tropical precipitation composite during MJO
+
phase 4.
+
MJO phase 7 pentad composites of anomalous 250-hPa geopotential height, where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO phase. Positive geopotential height anomalies are in red solid contours, and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour is omitted. Anomalies found to be 95% significantly different from zero are dotted. The color shading shows the anomalous tropical precipitation composite during MJO phase 7.
+ < color=black> MJO phase 7 pentad composites of anomalous 250-hPa geopotential height,
+ where
pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
+ phase. Positive geopotential height anomalies are in red solid contours, and negative
+ anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
+ is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO
phase 7.
+
MJO phase 8 pentad composites of anomalous 250-hPa geopotential height, where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO phase. Positive geopotential height anomalies are in red solid contours, and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour is omitted. Anomalies found to be 95% significantly different from zero are dotted. The color shading shows the anomalous tropical precipitation composite during MJO phase 8.
+ < color=black> MJO phase 8 pentad composites of anomalous 250-hPa geopotential height,
+ where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4
+ following an MJO phase. Positive geopotential height anomalies are in red solid contours,
+ and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero
+ contour is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO phase 8.
+
-This module computes the correlation between surface soil moisture (SM; top-10CM) and evapotranspiration (ET), at the interannual time scale, using summertime-mean values (JJA in the Northern Hemisphere, DJF in the Southern Hemisphere). Positive correlations indicate that SM controls ET variations. Negative correlations indicate that ET is ernergy-limited (by radiation and temperature). -The degree of coupling between SM and ET depends on how wet the local climate is; thus differences in precipitation between model and observations (independent, in first approximation, from the surface) induce differences in SM-ET coupling. Across CMIP5 models, the degree of SM-ET coupling is closely correlated with the amount of summertime rainfall; using this relationship (see Berg and Sheffield 2018, Figure 3), we correct the estimate of SM-ET coupling for the model by accounting for differences in summertime precipitation. In other words, the Pr-corrected estimate of SM-ET coupling is an estimate of the coupling that would be if precipitation in the model was equal to observed rainfall. + This module computes the correlation between surface soil moisture (SM; top-10CM) and evapotranspiration (ET), + at the interannual time scale, using summertime-mean values (JJA in the Northern Hemisphere, + DJF in the Southern Hemisphere). Positive correlations indicate that SM controls ET variations. + Negative correlations indicate that ET is ernergy-limited (by radiation and temperature). + The degree of coupling between SM and ET depends on how wet the local climate is; + thus differences in precipitation between model and observations (independent, in first approximation, + from the surface) induce differences in SM-ET coupling. Across CMIP5 models, the degree of SM-ET coupling + is closely correlated with the amount of summertime rainfall; using this relationship + (see Berg and Sheffield 2018, Figure 3), we correct the estimate of SM-ET coupling for the model by accounting + for differences in summertime precipitation. In other words, the Pr-corrected estimate of SM-ET coupling is an + estimate of the coupling that would be if precipitation in the model was equal to observed rainfall.
-Full Documentation and Contact Information + + Full Documentation and Contact Information
Soil Moisture-EvapoTranspiration coupling + | < color=navy> Soil Moisture-EvapoTranspiration coupling | {{CASENAME}} | OBS (GLEAM) | Model-OBS
diff --git a/diagnostics/SM_ET_coupling/SM_ET_coupling.py b/diagnostics/SM_ET_coupling/SM_ET_coupling.py
index 7af1e0dfe..5d27aadc6 100644
--- a/diagnostics/SM_ET_coupling/SM_ET_coupling.py
+++ b/diagnostics/SM_ET_coupling/SM_ET_coupling.py
@@ -1,6 +1,6 @@
# This file is part of the SM_ET_coupling module of the MDTF code package (see LICENSE.txt)
-#============================================================
+# ============================================================
# Coupling between soil moisture (SM) and evapotanspiration (ET) in summer
# Sample code to call R from python
# Code written by Alexis Berg
@@ -8,16 +8,19 @@
# This module calculates the correlations between SM and ET, as in Berg and Sheffield (2018), Fig.1a.
#
# Reference:
-# Berg and Sheffield (2018), Soil moisture-evapotranspiration coupling in CMIP5 models: relationship with simulated climate and projections, Journal of Climate, 31(12), 4865-4878.
-#============================================================
+# Berg and Sheffield (2018), Soil moisture-evapotranspiration coupling in CMIP5 models:
+# relationship with simulated climate and projections, Journal of Climate, 31(12), 4865-4878.
+# ============================================================
import os
import subprocess
import time
-#============================================================
+# ============================================================
# generate_ncl_plots - call a nclPlotFile via subprocess call
-#============================================================
+# ============================================================
+
+
def generate_R_plots(RPlotFile):
"""generate_plots_call - call a RPlotFile via subprocess call
@@ -28,17 +31,17 @@ def generate_R_plots(RPlotFile):
# don't exit if it does not exists just print a warning.
try:
pipe = subprocess.Popen([
- 'Rscript --verbose --vanilla {}'.format(RPlotFile)
- ] , shell=True, stdout=subprocess.PIPE)
+ 'Rscript --verbose --vanilla {}'.format(RPlotFile)], shell=True, stdout=subprocess.PIPE)
output = pipe.communicate()[0].decode()
- print('R routine {0} \n {1}'.format(RPlotFile,output))
+ print('R routine {0} \n {1}'.format(RPlotFile, output))
while pipe.poll() is None:
time.sleep(0.5)
except OSError as e:
- print('WARNING',e.errno,e.strerror)
+ print('WARNING', e.errno ,e.strerror)
return 0
+
if os.path.isfile(os.environ["MRSOS_FILE"]):
print("monthly soil moisture file found")
@@ -47,16 +50,12 @@ def generate_R_plots(RPlotFile):
print("computing SM-ET coupling")
-
-#============================================================
+# ============================================================
# Call R code here
-#============================================================
+# ============================================================
print("--------- Starting SM_ET coupling generate figures (using R)----------------------------")
- if ( True ):
+ if True:
generate_R_plots(os.environ["POD_HOME"]+"/SM_ET_coupling.R")
- else:
- print("WARNING: For testing purposes, skipping SM_ET coupling figure generation")
-
print("--------- Finished SM_ET coupling generate figures----------------------------")
else:
diff --git a/diagnostics/SM_ET_coupling/settings.jsonc b/diagnostics/SM_ET_coupling/settings.jsonc
index ce1a2f101..2850b3ff4 100644
--- a/diagnostics/SM_ET_coupling/settings.jsonc
+++ b/diagnostics/SM_ET_coupling/settings.jsonc
@@ -9,6 +9,7 @@
"driver" : "SM_ET_coupling.py",
"long_name" : "Coupling between Soil Moisture and EvapoTranspiration",
"realm" : ["atmos", "land"],
+ "convention" : "cmip",
"description" : "Coupling of Soil Moisture with Evapotranspiration",
"runtime_requirements": {
"python3": [],
@@ -19,23 +20,34 @@
"frequency": "mon"
},
"dimensions": {
- "lat": {"standard_name": "latitude"},
- "lon": {"standard_name": "longitude"},
+ "lat": {
+ "standard_name": "latitude",
+ "units": "degrees_north",
+ "axis": "Y"
+ },
+ "lon": {
+ "standard_name": "longitude",
+ "units": "degrees_east",
+ "axis": "X"
+ },
"time": {"standard_name": "time"}
},
"varlist" : {
"mrsos": {
"standard_name": "mass_content_of_water_in_soil_layer",
+ "realm" : "land",
"units": "kg m-2",
"dimensions": ["time", "lat", "lon"]
},
"evspsbl": {
"standard_name": "water_evapotranspiration_flux",
+ "realm": "land",
"units": "kg m-2 s-1",
"dimensions": ["time", "lat", "lon"]
},
"pr": {
"standard_name": "precipitation_flux",
+ "realm": "atmos",
"units": "kg m-2 s-1",
"dimensions": ["time", "lat", "lon"]
}
diff --git a/diagnostics/TC_MSE/Binning_and_compositing.py b/diagnostics/TC_MSE/Binning_and_compositing.py
index ad5db7a60..4a8fda035 100644
--- a/diagnostics/TC_MSE/Binning_and_compositing.py
+++ b/diagnostics/TC_MSE/Binning_and_compositing.py
@@ -1,54 +1,58 @@
-#Import modules
+# Import modules
import os
import numpy as np
import xarray as xr
-########## BINNING/COMPOSITING MODEL DATA #############################################################
+# BINNING/COMPOSITING MODEL DATA #############################################################
-######################## MATH FUNCTION(S) ############################################
-def boxavg(thing,lat,lon):
- coslat_values = np.transpose(np.tile(np.cos(np.deg2rad(lat)),(len(lon),1)))
+# MATH FUNCTION(S) ############################################
+
+
+def boxavg(thing, lat, lon):
+ coslat_values = np.transpose(np.tile(np.cos(np.deg2rad(lat)), (len(lon), 1)))
thing1 = thing*coslat_values
thing2 = thing1/thing1
average = np.nansum(np.nansum(thing1,0))/np.nansum(np.nansum(coslat_values*thing2,0))
return average
-#Lats/Lons
-latres = np.float(os.getenv("latres"))
-lonres = np.float(os.getenv("lonres"))
-lats = np.arange(-5,5+latres,latres)
-lons = np.arange(-5,5+lonres,lonres)
-#Gather the years that were inputted by user
-FIRST_YR = np.int(os.getenv("FIRSTYR"))
-LAST_YR = np.int(os.getenv("LASTYR"))
+# Lats/Lons
+
+
+latres = float(os.getenv("latres"))
+lonres = float(os.getenv("lonres"))
+lats = np.arange(-5, 5+latres, latres)
+lons = np.arange(-5, 5+lonres, lonres)
+# Gather the years that were inputted by user
+FIRST_YR = np.int(os.getenv("startdate"))
+LAST_YR = np.int(os.getenv("enddate"))
ds_all = []
for y in range(FIRST_YR,LAST_YR+1):
- #Open all the yearly snapshot (budget and regular variable) files
- ds_reg = xr.open_dataset(os.environ['WK_DIR']+'/model/Model_Regular_Variables_'+str(y)+'.nc')
- ds_budg = xr.open_dataset(os.environ['WK_DIR']+'/model/Model_Budget_Variables_'+str(y)+'.nc')
- #Merge the budget and regular variable files by year
+ # Open all the yearly snapshot (budget and regular variable) files
+ ds_reg = xr.open_dataset(os.environ['WORK_DIR'] + '/model/Model_Regular_Variables_' + str(y) + '.nc')
+ ds_budg = xr.open_dataset(os.environ['WORK_DIR']+'/model/Model_Budget_Variables_' + str(y) + '.nc')
+ # Merge the budget and regular variable files by year
ds_merge = xr.merge([ds_reg,ds_budg])
ds_reg.close()
ds_budg.close()
- #Get all the merged files together so that once all are collected they can be concatenated
+ # Get all the merged files together so that once all are collected they can be concatenated
ds_all.append(ds_merge)
ds_merge.close()
-#Concatenate the year files together so all variables are combined across all storms
-data = xr.concat(ds_all,dim='numstorms')
+# Concatenate the year files together so all variables are combined across all storms
+data = xr.concat(ds_all, dim='numstorms')
-#Get a list of the data variables in data to trim the data after lifetime maximum intensity (LMI)
+# Get a list of the data variables in data to trim the data after lifetime maximum intensity (LMI)
Model_vars = list(data.keys())
-#Grab the vmax variable to get the LMI itself and point of LMI for trimming to account only for intensification period
+# Grab the vmax variable to get the LMI itself and point of LMI for trimming to account only for intensification period
maxwinds = data['maxwind']
winds_list = []
-#Loop through the variables to pick out the feedbacks and add a normalized version of that variable
+# Loop through the variables to pick out the feedbacks and add a normalized version of that variable
for var in Model_vars:
- if(var[0:5]=='hanom' or var[0:10]=='hMoistanom' or var[0:10]=='hTempanom' or var[0:4]=='hvar'):
+ if var[0:5] == 'hanom' or var[0:10] == 'hMoistanom' or var[0:10] == 'hTempanom' or var[0:4] == 'hvar':
normvar = np.array(data[var])
boxavrawvar = np.array(data[var])
boxavvar = np.ones((len(maxwinds),len(maxwinds[0]))) * np.nan
@@ -56,115 +60,120 @@ def boxavg(thing,lat,lon):
for s in range(len(maxwinds)):
for t in range(len(maxwinds[s])):
hvar = np.array(data.hvar[s][t][:][:])
- boxavghvar = boxavg(hvar,np.array(data.latitude[s][t][:]),np.array(data.longitude[s][t][:]))
+ boxavghvar = boxavg(hvar,np.array(data.latitude[s][t][:]), np.array(data.longitude[s][t][:]))
normvar[s][t][:][:] = normvar[s][t][:][:]/boxavghvar
- boxavvar[s][t] = boxavg(boxavrawvar[s][t][:][:],np.array(data.latitude[s][t][:]),np.array(data.longitude[s][t][:]))
- boxavnormvar[s][t] = boxavg(np.array(normvar[s][t][:][:]),np.array(data.latitude[s][t][:]),np.array(data.longitude[s][t][:]))
- data['norm'+var] = (['numstorms','numsteps','latlen','lonlen'],np.array(normvar[:][:][:][:]))
- data['boxav_'+var] = (['numstorms','numsteps'],np.array(boxavvar[:][:]))
- data['boxav_norm_'+var] = (['numstorms','numsteps'],np.array(boxavnormvar[:][:]))
-
-#Loop through all model storms and find the LMI, then tag each storm with its LMI for binning later
+ boxavvar[s][t] = boxavg(boxavrawvar[s][t][:][:], np.array(data.latitude[s][t][:]),
+ np.array(data.longitude[s][t][:]))
+ boxavnormvar[s][t] = boxavg(np.array(normvar[s][t][:][:]), np.array(data.latitude[s][t][:]),
+ np.array(data.longitude[s][t][:]))
+ data['norm' + var] = (['numstorms', 'numsteps', 'latlen', 'lonlen'], np.array(normvar[:][:][:][:]))
+ data['boxav_' + var] = (['numstorms', 'numsteps'], np.array(boxavvar[:][:]))
+ data['boxav_norm_' + var] = (['numstorms', 'numsteps'], np.array(boxavnormvar[:][:]))
+
+# Loop through all model storms and find the LMI, then tag each storm with its LMI for binning later
for s in range(len(maxwinds)):
windmax = float(max(maxwinds[s][:]))
- windmaxindex = np.squeeze(np.where(maxwinds[s]==windmax))
- #Check if there are more than one maximum wind speed
+ windmaxindex = np.squeeze(np.where(maxwinds[s] == windmax))
+ # Check if there are more than one maximum wind speed
if windmaxindex.size >= 2:
windmaxindex = int(windmaxindex[0])
else:
- windmaxindex = int(np.squeeze(np.where(maxwinds[s]==windmax)))
- #Loop and have all the indices after the timestep of LMI be NaN for all vars
+ windmaxindex = int(np.squeeze(np.where(maxwinds[s] == windmax)))
+ # Loop and have all the indices after the timestep of LMI be NaN for all vars
for var in Model_vars:
- data[var][s,windmaxindex+1:len(maxwinds[s])+1] = np.nan
+ data[var][s, windmaxindex+1:len(maxwinds[s])+1] = np.nan
vmax_indiv_list = []
- for t in range(0,len(maxwinds[s])):
- #First check and NaN all variables at timesteps where TC center is outside 30 N/S
- if(data.centerLat[s][t]>30 or data.centerLat[s][t]<-30):
+ for t in range(0, len(maxwinds[s])):
+ # First check and NaN all variables at timesteps where TC center is outside 30 N/S
+ if data.centerLat[s][t] > 30 or data.centerLat[s][t] < -30:
for var in Model_vars:
- if(data[var].ndim==2):
+ if data[var].ndim == 2:
data[var][s][t] = np.nan
- elif(data[var].ndim==3):
+ elif data[var].ndim == 3:
data[var][s][t][:] = np.nan
else:
data[var][s][t][:][:] = np.nan
- #Get max wind at specific step to tag the steps for binning snapshot
+ # Get max wind at specific step to tag the steps for binning snapshot
vmax_sel = maxwinds[s,t].values
- vmax = xr.full_like(data.h[s,t],float(vmax_sel)).rename('vmax')
+ vmax = xr.full_like(data.h[s, t], float(vmax_sel)).rename('vmax')
vmax_indiv_list.append(vmax)
- vmax_indiv_array = xr.concat(vmax_indiv_list,dim='numsteps')
- #Create the vmax tag variable
+ vmax_indiv_array = xr.concat(vmax_indiv_list, dim='numsteps')
+ # Create the vmax tag variable
winds_list.append(vmax_indiv_array)
-#Update Model data with the vmax tag created above
-model_winds_array = xr.concat(winds_list,dim='numstorms')
-model_updated = xr.merge([data,model_winds_array])
-#Stretch the boxav variables to 1 dimension and make a new stretched windmax variable
+# Update Model data with the vmax tag created above
+model_winds_array = xr.concat(winds_list, dim='numstorms')
+model_updated = xr.merge([data, model_winds_array])
+# Stretch the boxav variables to 1 dimension and make a new stretched windmax variable
newvars = list(model_updated.keys())
for var in newvars:
- if(var[0:5]=='boxav'):
- model_updated['new_'+var] = (['newsteps'],np.squeeze(np.reshape(np.array(model_updated[var]),(len(data.numstorms)*len(data.numsteps)))))
+ if var[0:5] == 'boxav':
+ (model_updated)['new_' + var] = (['newsteps'], np.squeeze(np.reshape(np.array(model_updated[var]),
+ (len(data.numstorms)*len(data.numsteps)))))
-model_updated['new_maxwind'] = (['newsteps'],np.squeeze(np.reshape(np.array(model_updated['maxwind']),(len(data.numstorms)*len(data.numsteps)))))
+model_updated['new_maxwind'] = (['newsteps'], np.squeeze(np.reshape(np.array(model_updated['maxwind']),
+ (len(data.numstorms)*len(data.numsteps)))))
-#Bin snapshots according to max wind speed bins
+# Bin snapshots according to max wind speed bins
bins = np.arange(0,66,3)
-#Set a count array to gather the sample size for each bin and all bins
+# Set a count array to gather the sample size for each bin and all bins
count_denom = len(data.latitude[0][0]) * len(data.longitude[0][0])
bins_count = np.zeros(len(bins))
vmax2 = model_updated.vmax.copy(deep=True)
onedvmax = model_updated.new_maxwind.copy(deep=True)
for b, bin in enumerate(bins):
upperbin = bin+3
- #Variable to get the number of samples for the current bin (divide by the resolution dims multiplied together)
+ # Variable to get the number of samples for the current bin (divide by the resolution dims multiplied together)
count = (len(np.where((model_updated.vmax>=bin)&(model_updated.vmax![]() -This POD reads in model track data to create 10 degree by 10 degree snapshots around a tropical cyclone (TC) center. The snapshots are made for all storms between -the years inputted by the user and the column-integrated moist static energy (MSE) budget diabatic terms are calculated for in various ways (azimuthal mean, box-averaged -nromalized). Then, the snapshots are binned based on the maximum velocity at the time of the snapshot and composited by bin. The bin composites are then compared -across 5 reanalysis datasets (CFSR, MERRA2, ERA-Interim, ERA-5, and JRA-55), which have already gone through the aforementioned procedure, by being plotted against -each other in different ways shown below. + This POD reads in model track data to create 10 degree by 10 degree snapshots around a tropical cyclone (TC) center. + The snapshots are made for all storms between the + years inputted by the user and the column-integrated moist static energy (MSE) budget diabatic terms are calculated + for in various ways (azimuthal mean, box-averaged normalized). Then, the snapshots are binned based on the maximum + velocity at the time of the snapshot and composited by bin. The bin composites are then compared across 5 + reanalysis datasets (CFSR, MERRA2, ERA-Interim, ERA-5, and JRA-55), which have already gone through the + aforementioned procedure, by being plotted against each other in different ways shown below.
$CASENAME")
- settings_template = Template(" | $FIRSTYR - $LASTYR ")
+ case_template = Template(" | $CASENAME")
+ settings_template = Template(" | $startdate - $enddate ")
- for case_name, case_settings in case_dict:
- html_template = case_template.safe_substitute(case_settings)
+ for case_name, case_settings in case_dict.items():
+ html_template = case_template.safe_substitute(case_settings)
f.write(html_template)
html_template = settings_template.safe_substitute(case_settings)
@@ -145,25 +148,24 @@ def generate_html_file_case_loop(f,case_dict):
| POD Settings
"""
f.write(html_template)
-
-
-#============================================================
+# ============================================================
# generate_html_file_case_single (NOT multirun)
-#============================================================
-def generate_html_file_case_single(f):
+# ============================================================
+
+
+def generate_html_file_case_single(f):
"""generate_html_file: write a template file that the framework will replace
Arguments: f (file handle)
-
"""
-
# Write the Annual Cycle figure. Leave replacements to the framework (for now)
# see case_multi for how to substitute eventually
- html_template = " | {{CASENAME}}"
+ html_template = \
+ " | {{CASENAME}}"
f.write(html_template)
- #finalize the figure table, start the case settings table
+ # finalize the figure table, start the case settings table
html_template = """
| {{CASENAME}}"
f.write(html_template)
- html_template = " | {{FIRSTYR}} - {{LASTYR}} "
+ html_template = " | {{startdate}} - {{enddate}} "
f.write(html_template)
# Finish the table
@@ -188,11 +190,12 @@ def generate_html_file_case_single(f):
"""
f.write(html_template)
-
-#============================================================
+# ============================================================
# generate_html_file_footer
-#============================================================
+# ============================================================
+
+
def generate_html_file_footer(f):
"""generate_html_file_footer: write the footer to the
the html template
@@ -200,9 +203,9 @@ def generate_html_file_footer(f):
Arguments: f (file handle)
"""
- #Finish off the website with all the settings from the run
- #The following are replaced by the framework in a call from environment_manager.py
- #It would be great to just dump the dict but it isn't accessible here
+ # Finish off the website with all the settings from the run
+ # The following are replaced by the framework in a call from environment_manager.py
+ # It would be great to just dump the dict but it isn't accessible here
# maybe python codes are called with the pod object
html_template = """
@@ -210,14 +213,14 @@ def generate_html_file_footer(f):
| POD Settings
| SEASON | ANN
- | MDTF_BLOCKING_OBS | "{{MDTF_BLOCKING_OBS}}"
+ | MDTF_BLOCKING_OBS | "{{MDTF_BLOCKING_OBS}}"
| MDTF_BLOCKING_CAM3 | "{{MDTF_BLOCKING_CAM3}}"
| MDTF_BLOCKING_CAM4 | "{{MDTF_BLOCKING_CAM4}}"
| MDTF_BLOCKING_CAM5 | "{{MDTF_BLOCKING_CAM5}}"
| MDTF_BLOCKING_OBS_USE_CASE_YEARS | "{{MDTF_BLOCKING_OBS_USE_CASE_YEARS}}"
- | MDTF_BLOCKING_OBS_CAM5 FIRSTYR - LASTYR | "{{MDTF_BLOCKING_OBS_CAM5_FIRSTYR}} - {{MDTF_BLOCKING_OBS_CAM5_LASTYR}}"
- | MDTF_BLOCKING_OBS_ERA FIRSTYR - LASTYR | "{{MDTF_BLOCKING_OBS_ERA_FIRSTYR }} - {{MDTF_BLOCKING_OBS_ERA_LASTYR}}"
- | MDTF_BLOCKING_OBS_MERRA FIRSTYR - LASTYR | "{{MDTF_BLOCKING_OBS_MERRA_FIRSTYR}} - {{MDTF_BLOCKING_OBS_MERRA_LASTYR}}"
+ | MDTF_BLOCKING_OBS_CAM5 STARTDATE - ENDDATE | "{{MDTF_BLOCKING_OBS_CAM5_STARTDATE}} - {{MDTF_BLOCKING_OBS_CAM5_ENDDATE}}"
+ | MDTF_BLOCKING_OBS_ERA STARTDATE - ENDDATE | "{{MDTF_BLOCKING_OBS_ERA_STARTDATE }} - {{MDTF_BLOCKING_OBS_ERA_ENDDATE}}"
+ | MDTF_BLOCKING_OBS_MERRA STARTDATE - ENDDATE | "{{MDTF_BLOCKING_OBS_MERRA_STARTDATE}} - {{MDTF_BLOCKING_OBS_MERRA_ENDDATE}}"
| MDTF_BLOCKING_READ_DIGESTED | "{{MDTF_BLOCKING_READ_DIGESTED}}"
| MDTF_BLOCKING_WRITE_DIGESTED | "{{MDTF_BLOCKING_WRITE_DIGESTED}}"
@@ -227,7 +230,7 @@ def generate_html_file_footer(f):
| MODEL_DATA_PATH | "{{MODEL_DATA_PATH}}"
| OBS_DATA | "{{OBS_DATA}}"
| POD_HOME | "{{POD_HOME}}"
- | WK_DIR | "{{WK_DIR}}"
+ | WORK_DIR | "{{WORK_DIR}}"
| case_env_file | "{{case_env_file}}"
| zg_var | "{{zg_var}}"
@@ -240,35 +243,36 @@ def generate_html_file_footer(f):
# writing the code into the file
f.write(html_template)
-#============================================================
+# ============================================================
# generate_html_file
-#============================================================
-def generate_html_file(html_page,case_dict=None):
+# ============================================================
+
+
+def generate_html_file(html_page: str, case_dict=None):
"""generate_html_file: write the html file template
with generic variable names, for the correct cases
- Arguments: html_page(string) file name full path
+ Arguments: html_page(string): file name full path
case_dict (nested dict)
"""
- f = open(html_page,"w")
+ f = open(html_page, "w")
generate_html_file_header(f)
- if (os.environ["CASE_N"] == "1"):
+ if os.environ["CASE_N"] == "1":
generate_html_file_case_single(f)
else:
- generate_html_file_case_loop(f,case_dict)
+ generate_html_file_case_loop(f, case_dict)
generate_html_file_footer(f)
-
-
+
# close the file
f.close()
+# ============================================================
+# generate_ncl_plots - call a nclPlotFile via subprocess call
+# ============================================================
-#============================================================
-# generate_ncl_plots - call a nclPlotFile via subprocess call
-#============================================================
def generate_ncl_plots(nclPlotFile):
"""generate_plots_call - call a nclPlotFile via subprocess call
@@ -280,11 +284,11 @@ def generate_ncl_plots(nclPlotFile):
try:
pipe = subprocess.Popen(['ncl {0}'.format(nclPlotFile)], shell=True, stdout=subprocess.PIPE)
output = pipe.communicate()[0].decode()
- print('NCL routine {0} \n {1}'.format(nclPlotFile,output))
+ print('NCL routine {0} \n {1}'.format(nclPlotFile, output))
while pipe.poll() is None:
time.sleep(0.5)
except OSError as e:
- print('WARNING',e.errno,e.strerror)
+ print('WARNING', e.errno, e.strerror)
return 0
@@ -292,19 +296,21 @@ def generate_ncl_plots(nclPlotFile):
# MAIN
############################################################
-#============================================================
+# ============================================================
# Translate yaml file variables to environment variables for
-# NCL programs to read
-#============================================================
+# NCL programs to read
+# ============================================================
+
+# Check for $WORK_DIR/case_env.yaml, as sign of multiple cases
+
-# Check for $WKDIR/case_env.yaml, as sign of multipe cases
print("blocking_neale.py looking for possible multicase case_env_file")
env_var = "case_env_file"
-if env_var in os.environ:
+if env_var in os.environ:
case_env_file = os.environ.get("case_env_file")
print("blocking_neale.py case_env_file found? ",case_env_file)
- if (os.path.isfile(case_env_file)):
+ if os.path.isfile(case_env_file):
with open(case_env_file, 'r') as stream:
try:
case_info = yaml.safe_load(stream)
@@ -315,47 +321,45 @@ def generate_ncl_plots(nclPlotFile):
icase = 0 # index for cases, needed to save numbered env vars
for case_name, case_settings in case_info.items():
- icase = icase+1
- print("case ",icase,": ",case_name)
+ icase = icase + 1
+ print("case ", icase, ": ", case_name)
for k, v in case_settings.items():
- casei_env_var_name = "CASE"+str(icase)+"_"+str(k)
+ casei_env_var_name = "CASE" + str(icase) + "_" + str(k)
os.environ[casei_env_var_name] = str(v)
- print("setenv ",casei_env_var_name,"\t ",v)
+ print("setenv ", casei_env_var_name, "\t ", v)
os.environ["CASE_N"] = str(icase)
- print("setenv ","CASE_N","\t ",icase)
+ print("setenv ", "CASE_N", "\t ", icase)
else:
print("No multicase case_env_file found so proceeding as single case")
os.environ["CASE_N"] = "1"
-
-#============================================================
+# ============================================================
# Call NCL code here
-#============================================================
+# ============================================================
if not os.path.exists(os.path.join(os.environ['DATADIR'], 'day')):
os.makedirs(os.path.join(os.environ['DATADIR'], 'day'))
print("blocking_neale.py calling blocking.ncl")
generate_ncl_plots(os.environ["POD_HOME"]+"/blocking.ncl")
-
-
-#============================================================
+# ============================================================
# Generate HTML page with correct number of cases
-#============================================================
-#This is the proper place but the framework fails if there isn't something
+# ============================================================
+# This is the proper place but the framework fails if there isn't something
# in the POD_HOME dir, and placing a stub file there ends up overwriting this!
-#html_page = os.environ["WK_DIR"]+"/blocking_neale.html"
+# html_page = os.environ["WORK_DIR"]+"/blocking_neale.html"
html_page = os.environ["POD_HOME"]+"/blocking_neale.html"
-print("blocking_neale.py generating dynamic webpage ",html_page)
+print("blocking_neale.py generating dynamic webpage ", html_page)
-if (os.environ["CASE_N"] == "1"):
+if os.environ["CASE_N"] == "1":
generate_html_file(html_page)
else:
- generate_html_file(html_page,case_info.items())
+ generate_html_file(html_page, case_info)
+# ============================================================
-#============================================================
print("blocking_neale.py finished.")
+sys.exit(0)
diff --git a/diagnostics/blocking_neale/settings.jsonc b/diagnostics/blocking_neale/settings.jsonc
index 1488127f1..835cafe91 100644
--- a/diagnostics/blocking_neale/settings.jsonc
+++ b/diagnostics/blocking_neale/settings.jsonc
@@ -14,10 +14,8 @@
// Human-readable name of the diagnostic. May contain spaces.
"long_name" : "Rich Neale's blocking diagnostic",
-
- // Modeling realm. If your diagnostic uses data from multiple realms, give
- // this as a list.
- "realm" : "atmos",
+ // Data convention exepected by the diagnostic: cmip (default), cesm, or gfdl
+ "convention": "cesm",
// Human-readable name of the diagnostic. May contain spaces. This
// is used to describe your diagnostic on the top-level index.html page.
@@ -70,8 +68,16 @@
// "dimensions" attribute for each variable must correspond to a coordinate
// named here.
"dimensions": {
- "lat": {"standard_name": "latitude"},
- "lon": {"standard_name": "longitude"},
+ "lat": {
+ "standard_name": "latitude",
+ "units": "degrees_north",
+ "axis": "Y"
+ },
+ "lon": {
+ "standard_name": "longitude",
+ "units": "degrees_east",
+ "axis": "X"
+ },
"lev": {
"standard_name": "air_pressure",
"units": "hPa",
@@ -91,6 +97,7 @@
"path_variable" : "MODEL_DATA_PATH",
"standard_name" : "geopotential_height",
"units": "m",
+ "realm" : "atmos",
"frequency" : "day",
"dimensions": ["time", "lat", "lon"],
"scalar_coordinates": {"lev" : 500}
diff --git a/diagnostics/convective_transition_diag/convecTransBasic.py b/diagnostics/convective_transition_diag/convecTransBasic.py
index ba0ef29c6..f203a9c3a 100644
--- a/diagnostics/convective_transition_diag/convecTransBasic.py
+++ b/diagnostics/convective_transition_diag/convecTransBasic.py
@@ -3,29 +3,29 @@
# ======================================================================
# convecTransBasic.py
#
-# Convective Transition Basic Statistics
-# as part of functionality provided by
-# Convective Transition Diagnostic Package (convective_transition_diag_v1r3.py)
+# Convective Transition Basic Statistics
+# as part of functionality provided by
+# Convective Transition Diagnostic Package (convective_transition_diag_v1r3.py)
#
-# Version 1 revision 3 13-Nov-2017 Yi-Hung Kuo (UCLA)
-# PI: J. David Neelin (UCLA; neelin@atmos.ucla.edu)
-# Current developer: Yi-Hung Kuo (yhkuo@atmos.ucla.edu)
-# Contributors: K. A. Schiro (UCLA), B. Langenbrunner (UCLA), F. Ahmed (UCLA),
-# C. Martinez (UCLA), C.-C. (Jack) Chen (NCAR)
+# Version 1 revision 3 13-Nov-2017 Yi-Hung Kuo (UCLA)
+# PI: J. David Neelin (UCLA; neelin@atmos.ucla.edu)
+# Current developer: Yi-Hung Kuo (yhkuo@atmos.ucla.edu)
+# Contributors: K. A. Schiro (UCLA), B. Langenbrunner (UCLA), F. Ahmed (UCLA),
+# C. Martinez (UCLA), C.-C. (Jack) Chen (NCAR)
#
-# This file is part of the Convective Transition Diagnostic Package
-# and the MDTF code package. See LICENSE.txt for the license.
+# This file is part of the Convective Transition Diagnostic Package
+# and the MDTF code package. See LICENSE.txt for the license.
#
-# Computes a set of Convective Transition Statistics as in Kuo et al. (2018).
+# Computes a set of Convective Transition Statistics as in Kuo et al. (2018).
#
-# Generates plots of:
+# Generates plots of:
# (1) conditional average precipitation
# (2) conditional probability of precipitation
# (3) probability density function (PDF) of all events
# (4) PDF of precipitating events
# all as a function of column water vapor (CWV) and bulk tropospheric temperature
#
-# Depends on the following scripts:
+# Depends on the following scripts:
# (1) convecTransBasic_usp_calc.py
# (2) convecTransBasic_usp_plot.py
# (3) convecTransBasic_util.py
@@ -33,27 +33,27 @@
# Bulk tropospheric temperature measures used include
# (1) tave: mass-weighted column average temperature
# (2) qsat_int: column-integrated saturation humidity
-# Choose one by setting BULK_TROPOSPHERIC_TEMPERATURE_MEASURE
-# in mdtf.py (or convecTransBasic_usp_calc.py)
-# Here the column is 1000-200 hPa by default
+# Choose one by setting BULK_TROPOSPHERIC_TEMPERATURE_MEASURE
+# in mdtf.py (or convecTransBasic_usp_calc.py)
+# Here the column is 1000-200 hPa by default
#
# tave & qsat_int are not standard model output yet, pre-processing calculates these two
-# and saves them in the model output directory (if there is a permission issue,
-# change PREPROCESSING_OUTPUT_DIR with related changes, or simply force
-# data["SAVE_TAVE_QSAT_INT"]=0, both in convecTransBasic_usp_calc.py)
+# and saves them in the model output directory (if there is a permission issue,
+# change PREPROCESSING_OUTPUT_DIR with related changes, or simply force
+# data["SAVE_TAVE_QSAT_INT"]=0, both in convecTransBasic_usp_calc.py)
#
# Defaults for binning choices, etc. that can be altered by user are in:
-# convecTransBasic_usp_calc.py
+# convecTransBasic_usp_calc.py
#
# Defaults for plotting choices that can be altered by user are in:
-# convecTransBasic_usp_calc_plot.py
+# convecTransBasic_usp_calc_plot.py
#
# Utility functions are defined in convecTransBasic_util.py
#
# To change regions over which binning computations are done, see
-# convecTransBasic_usp_calc.py &
-# generate_region_mask in convecTransBasic_util.py
-# (and change obs_data/convective_transition_diag/region_0.25x0.25_costal2.5degExcluded.mat)
+# convecTransBasic_usp_calc.py &
+# generate_region_mask in convecTransBasic_util.py
+# (and change obs_data/convective_transition_diag/region_0.25x0.25_costal2.5degExcluded.mat)
# ======================================================================
# Import standard Python packages
import os
@@ -77,15 +77,15 @@
print("Load user-specified binning parameters..."),
# Create and read user-specified parameters
-os.system("python "+os.environ["POD_HOME"]+"/"+"convecTransBasic_usp_calc.py")
-with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_calc_parameters.json") as outfile:
- bin_data=json.load(outfile)
+os.system("python "+ os.environ["POD_HOME"]+ "/" + "convecTransBasic_usp_calc.py")
+with open(os.environ["WORK_DIR"]+"/" + "convecTransBasic_calc_parameters.json") as outfile:
+ bin_data = json.load(outfile)
print("...Loaded!")
print("Load user-specified plotting parameters..."),
-os.system("python "+os.environ["POD_HOME"]+"/"+"convecTransBasic_usp_plot.py")
-with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_plot_parameters.json") as outfile:
- plot_data=json.load(outfile)
+os.system("python " + os.environ["POD_HOME"] + "/" + "convecTransBasic_usp_plot.py")
+with open(os.environ["WORK_DIR"] + "/" + "convecTransBasic_plot_parameters.json") as outfile:
+ plot_data = json.load(outfile)
print("...Loaded!")
# ======================================================================
@@ -96,31 +96,32 @@
# if so, skip binning; otherwise, bin data using model output
# (see convecTransBasic_usp_calc.py for where the model output locate)
-if (len(bin_data["bin_output_list"])==0 or bin_data["BIN_ANYWAY"]):
+if len(bin_data["bin_output_list"]) == 0 or bin_data["BIN_ANYWAY"]:
print("Starting binning procedure...")
- if bin_data["PREPROCESS_TA"]==1:
- print(" Atmospheric temperature pre-processing required")
- if bin_data["SAVE_TAVE_QSAT_INT"]==1:
- print(" Pre-processed temperature fields ("\
- +os.environ["tave_var"]+" & "+os.environ["qsat_int_var"]\
- +") will be saved to "+bin_data["PREPROCESSING_OUTPUT_DIR"]+"/")
+ if bin_data["PREPROCESS_TA"] == 1:
+ print(" Atmospheric temperature pre-processing required")
+ if bin_data["SAVE_TAVE_QSAT_INT"] == 1:
+ print(" Pre-processed temperature fields ("
+ + os.environ["tave_var"] + " & " + os.environ["qsat_int_var"]
+ + ") will be saved to " + bin_data["PREPROCESSING_OUTPUT_DIR"] + "/")
# Load & pre-process region mask
- REGION=generate_region_mask(bin_data["REGION_MASK_DIR"]+"/"+bin_data["REGION_MASK_FILENAME"], bin_data["pr_list"][0],bin_data["LAT_VAR"],bin_data["LON_VAR"])
+ REGION=generate_region_mask(bin_data["REGION_MASK_DIR"] + "/" + bin_data["REGION_MASK_FILENAME"],
+ bin_data["pr_list"][0], bin_data["LAT_VAR"], bin_data["LON_VAR"])
# Pre-process temperature (if necessary) & bin & save binned results
- binned_output=convecTransBasic_calc_model(REGION,bin_data["args1"])
+ binned_output=convecTransBasic_calc_model(REGION, bin_data["args1"])
-else: # Binned data file exists & BIN_ANYWAY=False
+else: # Binned data file exists & BIN_ANYWAY=False
print("Binned output detected..."),
binned_output=convecTransBasic_loadAnalyzedData(bin_data["args2"])
print("...Loaded!")
# ======================================================================
# Plot binning results & save the figure in wkdir/MDTF_casename/.../
-convecTransBasic_plot(binned_output,plot_data["plot_params"],plot_data["args3"],plot_data["args4"])
+convecTransBasic_plot(binned_output, plot_data["plot_params"], plot_data["args3"], plot_data["args4"])
print("**************************************************")
print("Convective Transition Basic Statistics (convecTransBasic.py) Executed!")
diff --git a/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py b/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py
index 1da392488..31efcc7e5 100644
--- a/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py
+++ b/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py
@@ -31,73 +31,73 @@
# ======================================================================
# Region mask directory & filename
-REGION_MASK_DIR=os.environ["OBS_DATA"]
-REGION_MASK_FILENAME="region_0.25x0.25_costal2.5degExcluded.mat"
+REGION_MASK_DIR = os.environ["OBS_DATA"]
+REGION_MASK_FILENAME = "region_0.25x0.25_costal2.5degExcluded.mat"
# Number of regions
# Use grids with 1<=region<=NUMBER_OF_REGIONS in the mask
-NUMBER_OF_REGIONS=4 # default: 4
+NUMBER_OF_REGIONS = 4 # default: 4
# Region names
-REGION_STR=["WPac","EPac","Atl","Ind"]
+REGION_STR = ["WPac", "EPac", "Atl", "Ind"]
# ======================================================================
# Directory for saving pre-processed temperature fields
-# tave [K]: Mass-weighted column average temperature
-# qsat_int [mm]: Column-integrated saturation specific humidity
+# tave [K]: Mass-weighted column average temperature
+# qsat_int [mm]: Column-integrated saturation specific humidity
# USER MUST HAVE WRITE PERMISSION
-# If one changes PREPROCESSING_OUTPUT_DIR, one must also modify data["tave_list"]
-# & data["qsat_int_list"] below by replacing MODEL_OUTPUT_DIR with
-# PREPROCESSING_OUTPUT_DIR
-PREPROCESSING_OUTPUT_DIR=os.environ["DATADIR"]
-TAVE_VAR=os.environ["tave_var"]
-QSAT_INT_VAR=os.environ["qsat_int_var"]
+# If one changes PREPROCESSING_OUTPUT_DIR, one must also modify data["tave_list"]
+# & data["qsat_int_list"] below by replacing MODEL_OUTPUT_DIR with
+# PREPROCESSING_OUTPUT_DIR
+PREPROCESSING_OUTPUT_DIR = os.environ["DATADIR"]
+TAVE_VAR = os.environ["tave_var"]
+QSAT_INT_VAR = os.environ["qsat_int_var"]
# Number of time-steps in Temperature-preprocessing
-# Default: 1000 (use smaller numbers for limited memory)
-time_idx_delta=1000
+# Default: 1000 (use smaller numbers for limited memory)
+time_idx_delta = 1000
# Use 1:tave, or 2:qsat_int as Bulk Tropospheric Temperature Measure
-BULK_TROPOSPHERIC_TEMPERATURE_MEASURE=int(os.environ["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"])
+BULK_TROPOSPHERIC_TEMPERATURE_MEASURE = int(os.environ["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"])
# ======================================================================
# Directory & Filename for saving binned results (netCDF4)
# tave or qsat_int will be appended to BIN_OUTPUT_FILENAME
-BIN_OUTPUT_DIR=os.environ["WK_DIR"]+"/model/netCDF"
-BIN_OUTPUT_FILENAME=os.environ["CASENAME"]+".convecTransBasic"
+BIN_OUTPUT_DIR = os.environ["WORK_DIR"] + "/model/netCDF"
+BIN_OUTPUT_FILENAME = os.environ["CASENAME"] + ".convecTransBasic"
# ======================================================================
# Re-do binning even if binned data file detected (default: True)
-BIN_ANYWAY=True
+BIN_ANYWAY = True
# ======================================================================
# Column Water Vapor (CWV in mm) range & bin-width
# CWV bin centers are integral multiples of cwv_bin_width
-CWV_BIN_WIDTH=0.3 # default=0.3 (following satellite retrieval product)
-CWV_RANGE_MAX=90.0 # default=90 (75 for satellite retrieval product)
+CWV_BIN_WIDTH = 0.3 # default=0.3 (following satellite retrieval product)
+CWV_RANGE_MAX = 90.0 # default=90 (75 for satellite retrieval product)
# Mass-weighted Column Average Temperature tave [K] range & bin-width
# with 1K increment and integral bin centers
-T_RANGE_MIN=260.0
-T_RANGE_MAX=280.0
-T_BIN_WIDTH=1.0
+T_RANGE_MIN = 260.0
+T_RANGE_MAX = 280.0
+T_BIN_WIDTH = 1.0
# Column-integrated Saturation Specific Humidity qsat_int [mm] range & bin-width
-# with bin centers = Q_RANGE_MIN + integer*Q_BIN_WIDTH
+# with bin centers = Q_RANGE_MIN + integer*Q_BIN_WIDTH
# Satellite retrieval suggests T_BIN_WIDTH=1
-# is approximately equivalent to Q_BIN_WIDTH=4.8
-Q_RANGE_MIN=16.0
-Q_RANGE_MAX=106.0
-Q_BIN_WIDTH=4.5
+# is approximately equivalent to Q_BIN_WIDTH=4.8
+Q_RANGE_MIN = 16.0
+Q_RANGE_MAX = 106.0
+Q_BIN_WIDTH = 4.5
# Define column [hPa] (default: 1000-200 hPa)
-# One can re-define column by changing p_lev_bottom & p_lev_top,
-# but one must also delete/re-name existing tave & qsat_int files
-# since the default tave & qsat_int filenames do not contain conlumn info
-p_lev_bottom=1000
-p_lev_top=200
+# One can re-define column by changing p_lev_bottom & p_lev_top,
+# but one must also delete/re-name existing tave & qsat_int files
+# since the default tave & qsat_int filenames do not contain conlumn info
+p_lev_bottom = 1000
+p_lev_top = 200
# If model pressure levels are close to p_lev_bottom and/or p_lev_top
-# (within dp-hPa neighborhood), use model level(s) to define column instead
-dp=1.0
+# (within dp-hPa neighborhood), use model level(s) to define column instead
+dp = 1.0
# Threshold value defining precipitating events [mm/hr]
-PRECIP_THRESHOLD=0.25
+PRECIP_THRESHOLD = 0.25
# ======================================================================
# END USER SPECIFIED SECTION
@@ -107,130 +107,132 @@
# DO NOT MODIFY CODE BELOW UNLESS
# YOU KNOW WHAT YOU ARE DOING
# ======================================================================
-data={}
-
-data["MODEL"]=MODEL
-data["MODEL_OUTPUT_DIR"]=MODEL_OUTPUT_DIR
-data["PREPROCESSING_OUTPUT_DIR"]=PREPROCESSING_OUTPUT_DIR
-
-data["REGION_MASK_DIR"]=REGION_MASK_DIR
-data["REGION_MASK_FILENAME"]=REGION_MASK_FILENAME
-
-data["NUMBER_OF_REGIONS"]=NUMBER_OF_REGIONS
-data["REGION_STR"]=REGION_STR
-
-data["TIME_VAR"]=TIME_VAR
-data["LAT_VAR"]=LAT_VAR
-data["LON_VAR"]=LON_VAR
-data["TAVE_VAR"]=TAVE_VAR
-data["QSAT_INT_VAR"]=QSAT_INT_VAR
-data["PRES_VAR"]=PRES_VAR
-data["time_idx_delta"]=time_idx_delta
-data["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"]=BULK_TROPOSPHERIC_TEMPERATURE_MEASURE
-
-data["BIN_OUTPUT_DIR"]=BIN_OUTPUT_DIR
-data["BIN_OUTPUT_FILENAME"]=BIN_OUTPUT_FILENAME
-
-if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1:
- data["BIN_OUTPUT_FILENAME"]+="_"+TAVE_VAR
- data["TEMP_VAR"]=TAVE_VAR
-elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2:
- data["BIN_OUTPUT_FILENAME"]+="_"+QSAT_INT_VAR
- data["TEMP_VAR"]=QSAT_INT_VAR
-
-data["BIN_ANYWAY"]=BIN_ANYWAY
+data = {}
+
+data["MODEL"] = MODEL
+data["MODEL_OUTPUT_DIR"] = MODEL_OUTPUT_DIR
+data["PREPROCESSING_OUTPUT_DIR"] = PREPROCESSING_OUTPUT_DIR
+
+data["REGION_MASK_DIR"] = REGION_MASK_DIR
+data["REGION_MASK_FILENAME"] = REGION_MASK_FILENAME
+
+data["NUMBER_OF_REGIONS"] = NUMBER_OF_REGIONS
+data["REGION_STR"] = REGION_STR
+
+data["TIME_VAR"] = TIME_VAR
+data["LAT_VAR"] = LAT_VAR
+data["LON_VAR"] = LON_VAR
+data["TAVE_VAR"] = TAVE_VAR
+data["QSAT_INT_VAR"] = QSAT_INT_VAR
+data["PRES_VAR"] = PRES_VAR
+data["time_idx_delta"] = time_idx_delta
+data["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"] = BULK_TROPOSPHERIC_TEMPERATURE_MEASURE
+
+data["BIN_OUTPUT_DIR"] = BIN_OUTPUT_DIR
+data["BIN_OUTPUT_FILENAME"] = BIN_OUTPUT_FILENAME
+
+if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1:
+ data["BIN_OUTPUT_FILENAME"] += "_" + TAVE_VAR
+ data["TEMP_VAR"] = TAVE_VAR
+elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2:
+ data["BIN_OUTPUT_FILENAME"] += "_" + QSAT_INT_VAR
+ data["TEMP_VAR"] = QSAT_INT_VAR
+
+data["BIN_ANYWAY"] = BIN_ANYWAY
-data["CWV_BIN_WIDTH"]=CWV_BIN_WIDTH
-data["CWV_RANGE_MAX"]=CWV_RANGE_MAX
+data["CWV_BIN_WIDTH"] = CWV_BIN_WIDTH
+data["CWV_RANGE_MAX"] = CWV_RANGE_MAX
-data["T_RANGE_MIN"]=T_RANGE_MIN
-data["T_RANGE_MAX"]=T_RANGE_MAX
-data["T_BIN_WIDTH"]=T_BIN_WIDTH
+data["T_RANGE_MIN"] = T_RANGE_MIN
+data["T_RANGE_MAX"] = T_RANGE_MAX
+data["T_BIN_WIDTH"] = T_BIN_WIDTH
-data["Q_RANGE_MIN"]=Q_RANGE_MIN
-data["Q_RANGE_MAX"]=Q_RANGE_MAX
-data["Q_BIN_WIDTH"]=Q_BIN_WIDTH
+data["Q_RANGE_MIN"] = Q_RANGE_MIN
+data["Q_RANGE_MAX"] = Q_RANGE_MAX
+data["Q_BIN_WIDTH"] = Q_BIN_WIDTH
-data["p_lev_bottom"]=p_lev_bottom
-data["p_lev_top"]=p_lev_top
-data["dp"]=dp
+data["p_lev_bottom"] = p_lev_bottom
+data["p_lev_top"] = p_lev_top
+data["dp"] = dp
-data["PRECIP_THRESHOLD"]=PRECIP_THRESHOLD
+data["PRECIP_THRESHOLD"] = PRECIP_THRESHOLD
# List binned data file (with filename corresponding to casename)
-data["bin_output_list"]=sorted(glob.glob(data["BIN_OUTPUT_DIR"]+"/"+data["BIN_OUTPUT_FILENAME"]+".nc"))
+data["bin_output_list"] = sorted(glob.glob(data["BIN_OUTPUT_DIR"] + "/" + data["BIN_OUTPUT_FILENAME"] + ".nc"))
# List available netCDF files
# Assumes that the corresponding files in each list
-# have the same spatial/temporal coverage/resolution
-pr_list=sorted(glob.glob(os.environ["pr_file"]))
-prw_list=sorted(glob.glob(os.environ["prw_file"]))
-ta_list=sorted(glob.glob(os.environ["ta_file"]))
+# have the same spatial/temporal coverage/resolution
+pr_list = sorted(glob.glob(os.environ["pr_file"]))
+prw_list = sorted(glob.glob(os.environ["prw_file"]))
+ta_list = sorted(glob.glob(os.environ["ta_file"]))
data["pr_list"] = pr_list
data["prw_list"] = prw_list
data["ta_list"] = ta_list
# Check for pre-processed tave & qsat_int data
-data["tave_list"]=sorted(glob.glob(os.environ["tave_file"]))
-data["qsat_int_list"]=sorted(glob.glob(os.environ["qsat_int_file"]))
+data["tave_list"] = sorted(glob.glob(os.environ["tave_file"]))
+data["qsat_int_list"] = sorted(glob.glob(os.environ["qsat_int_file"]))
-if (len(data["tave_list"])==0 or len(data["qsat_int_list"])==0):
- data["PREPROCESS_TA"]=1
+if len(data["tave_list"]) == 0 or len(data["qsat_int_list"]) == 0:
+ data["PREPROCESS_TA"] = 1
else:
- data["PREPROCESS_TA"]=0
+ data["PREPROCESS_TA"] = 0
# Save pre-processed tave & qsat_int or not; default=0 (don't save)
-data["SAVE_TAVE_QSAT_INT"]=int(os.environ["SAVE_TAVE_QSAT_INT"])
-if data["PREPROCESS_TA"]!=data["SAVE_TAVE_QSAT_INT"]:
+data["SAVE_TAVE_QSAT_INT"] = int(os.environ["SAVE_TAVE_QSAT_INT"])
+if data["PREPROCESS_TA"] != data["SAVE_TAVE_QSAT_INT"]:
print("Pre-processing of air temperature (ta) required to compute weighted column averages,")
print(" but the pre-processed results will not be saved as intermediate output.")
print("To save the pre-processed results as NetCDF files for re-use (write permission required),")
print(" go to settings.jsonc, and changes SAVE_TAVE_QSAT_INT to 1.")
# Taking care of function arguments for binning
-data["args1"]=[ \
-BULK_TROPOSPHERIC_TEMPERATURE_MEASURE, \
-CWV_BIN_WIDTH, \
-CWV_RANGE_MAX, \
-T_RANGE_MIN, \
-T_RANGE_MAX, \
-T_BIN_WIDTH, \
-Q_RANGE_MIN, \
-Q_RANGE_MAX, \
-Q_BIN_WIDTH, \
-NUMBER_OF_REGIONS, \
-pr_list, \
-PR_VAR, \
-prw_list, \
-PRW_VAR, \
-data["PREPROCESS_TA"], \
-MODEL_OUTPUT_DIR, \
-data["qsat_int_list"], \
-QSAT_INT_VAR, \
-data["tave_list"], \
-TAVE_VAR, \
-ta_list, \
-TA_VAR, \
-PRES_VAR, \
-MODEL, \
-p_lev_bottom, \
-p_lev_top, \
-dp, \
-time_idx_delta, \
-data["SAVE_TAVE_QSAT_INT"], \
-PREPROCESSING_OUTPUT_DIR, \
-PRECIP_THRESHOLD, \
-data["BIN_OUTPUT_DIR"], \
-data["BIN_OUTPUT_FILENAME"], \
-TIME_VAR, \
-LAT_VAR, \
-LON_VAR ]
-
-data["args2"]=[ \
-data["bin_output_list"],\
-TAVE_VAR,\
-QSAT_INT_VAR,\
-BULK_TROPOSPHERIC_TEMPERATURE_MEASURE ]
-
-with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_calc_parameters.json", "w") as outfile:
+data["args1"]=[
+ BULK_TROPOSPHERIC_TEMPERATURE_MEASURE,
+ CWV_BIN_WIDTH,
+ CWV_RANGE_MAX,
+ T_RANGE_MIN,
+ T_RANGE_MAX,
+ T_BIN_WIDTH,
+ Q_RANGE_MIN,
+ Q_RANGE_MAX,
+ Q_BIN_WIDTH,
+ NUMBER_OF_REGIONS,
+ pr_list,
+ PR_VAR,
+ prw_list,
+ PRW_VAR,
+ data["PREPROCESS_TA"],
+ MODEL_OUTPUT_DIR,
+ data["qsat_int_list"],
+ QSAT_INT_VAR,
+ data["tave_list"],
+ TAVE_VAR,
+ ta_list,
+ TA_VAR,
+ PRES_VAR,
+ MODEL,
+ p_lev_bottom,
+ p_lev_top,
+ dp,
+ time_idx_delta,
+ data["SAVE_TAVE_QSAT_INT"],
+ PREPROCESSING_OUTPUT_DIR,
+ PRECIP_THRESHOLD,
+ data["BIN_OUTPUT_DIR"],
+ data["BIN_OUTPUT_FILENAME"],
+ TIME_VAR,
+ LAT_VAR,
+ LON_VAR
+]
+
+data["args2"] = [
+ data["bin_output_list"],
+ TAVE_VAR,
+ QSAT_INT_VAR,
+ BULK_TROPOSPHERIC_TEMPERATURE_MEASURE
+]
+
+with open(os.environ["WORK_DIR"] + "/" + "convecTransBasic_calc_parameters.json", "w") as outfile:
json.dump(data, outfile)
diff --git a/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py b/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py
index ef38c8790..e59b93ca7 100644
--- a/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py
+++ b/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py
@@ -13,139 +13,139 @@
import os
import glob
-with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_calc_parameters.json") as outfile:
+with open(os.environ["WORK_DIR"] + "/" + "convecTransBasic_calc_parameters.json") as outfile:
bin_data=json.load(outfile)
# ======================================================================
# START USER SPECIFIED SECTION
# ======================================================================
# Don't plot bins with PDF | ![]() Eulerian Storm Track Diagnostic-By filtering atmospheric data temporally, in a manner that removes the diurnal and the greater than weekly variability, one can isolate the synoptic variability (Blackmon et al. 1976). Then, the standard deviation of the filtered data at each latitude and longitude can be interpreted as the climatological baroclinic wave activity, which, for historical reasons, is termed storm tracks (Wallace et al. 1988). The storm tracks give a simple large-scale metric for the skill in the model representation of extratropical cyclones, in terms of location of the storms, their seasonality and their intensity, which correlates very strongly with transient poleward energy transport. + By filtering atmospheric data temporally, in a manner that removes the diurnal and the greater than weekly variability, + one can isolate the synoptic variability (Blackmon et al. 1976). Then, the standard deviation of the filtered data at + each latitude and longitude can be interpreted as the climatological baroclinic wave activity, which, + for historical reasons, is termed storm tracks (Wallace et al. 1988). The storm tracks give a simple + large-scale metric for the skill in the model representation of extratropical cyclones, in terms of location of + the storms, their seasonality and their intensity, which correlates very strongly with transient poleward + energy transport. -To isolate the synoptic timescale, this algorithm uses 24-hour differences of daily-averaged data. Using daily averages removes the diurnal cycle and the 24-hour differencing removes variability beyond 5 days (Wallace et al. 1988). After filtering the data to create anomalies, the variance of the anomalies is calculated across the four seasons for each year. Then the seasonal variances are averaged across all years. For the first year in the sequence, the variance for JF is calculated and treated as the first DJF instance. For the final December in the sequence is not used in the calculation. + To isolate the synoptic timescale, this algorithm uses 24-hour differences of daily-averaged data. + Using daily averages removes the diurnal cycle and the 24-hour differencing removes variability beyond 5 days + (Wallace et al. 1988). After filtering the data to create anomalies, the variance of the anomalies is calculated + across the four seasons for each year. Then the seasonal variances are averaged across all years. + For the first year in the sequence, the variance for JF is calculated and treated as the first DJF instance. + For the final December in the sequence is not used in the calculation. Full Documentation and Contact Information diff --git a/diagnostics/eulerian_storm_track/eulerian_storm_track.py b/diagnostics/eulerian_storm_track/eulerian_storm_track.py index 81650eda8..6d64c730e 100755 --- a/diagnostics/eulerian_storm_track/eulerian_storm_track.py +++ b/diagnostics/eulerian_storm_track/eulerian_storm_track.py @@ -1,193 +1,222 @@ # Code created by Jeyavinoth Jeyaratnam, to be implemented in MDTF # Import standarad Python packages -import numpy as np +import numpy as np from netCDF4 import Dataset import os import glob # Import my code from the current folder import eulerian_storm_track_util as est -import plotter # do not need this, just debugging purpose +import plotter # do not need this, just debugging purpose print("****************************************************************************************") print("Started Exeuction of Eulerian Storm Track Diagnostic Package (eulerian_storm_track.py)!") print("****************************************************************************************") # Setting up the necessary variable names -os.environ['v850_file'] = '*.'+os.environ['v850_var']+'.day.nc' +os.environ['v850_file'] = '*.' + os.environ['v850_var'] + '.day.nc' # Model output filename convection -os.environ['MODEL_OUTPUT_DIR'] = os.environ['DATADIR']+'/day' +os.environ['MODEL_OUTPUT_DIR'] = os.environ['DATADIR'] + '/day' missing_file = 0 -if (len(glob.glob(os.environ['MODEL_OUTPUT_DIR']+'/'+os.environ['v850_file']))==0): - print('Required V850 file missing!') - missing_file = 1 +if len(glob.glob(os.environ['MODEL_OUTPUT_DIR'] + '/' + os.environ['v850_file'])) == 0: + print('Required V850 file missing!') + missing_file = 1 -if (missing_file == 1): - print('MISSING FILES: Eulerian Strom Tracker will NOT be executed!') +if missing_file == 1: + print('MISSING FILES: Eulerian Strom Tracker will NOT be executed!') else: - ########################################################## - # Create the necessary directories - ########################################################## - - if not os.path.exists(os.environ['WK_DIR']+'/model'): - os.makedirs(os.environ['WK_DIR']+'/model') - if not os.path.exists(os.environ['WK_DIR']+'/model/netCDF'): - os.makedirs(os.environ['WK_DIR']+'/model/netCDF') - if not os.path.exists(os.environ['WK_DIR']+'/model/PS'): - os.makedirs(os.environ['WK_DIR']+'/model/PS') - if not os.path.exists(os.environ['WK_DIR']+'/obs'): - os.makedirs(os.environ['WK_DIR']+'/obs') - if not os.path.exists(os.environ['WK_DIR']+'/obs/netCDF'): - os.makedirs(os.environ['WK_DIR']+'/obs/netCDF') - if not os.path.exists(os.environ['WK_DIR']+'/obs/PS'): - os.makedirs(os.environ['WK_DIR']+'/obs/PS') - - ################################################################## - # Reading in the necessary data, and computing the daily eddies - ################################################################## - - netcdf_filename = os.environ['MODEL_OUTPUT_DIR']+'/'+os.environ['CASENAME']+'.'+os.environ['v850_var']+'.day.nc' - if (not os.path.exists(netcdf_filename)): - print ('Cannot Find File: ', netcdf_filename) - - # temporarily add the lat_var and lon_var - # since these values seem to be missing - os.environ['lat_var'] = 'lat' - os.environ['lon_var'] = 'lon' - os.environ['time_var'] = 'time' - - # reading in the model data - ncid = Dataset(netcdf_filename, 'r') - lat = ncid.variables[os.environ['lat_var']][:] - lat.fill_value = np.nan - lat = lat.filled() - lon = ncid.variables[os.environ['lon_var']][:] - lon.fill_value = np.nan - lon = lon.filled() - time = ncid.variables[os.environ['time_var']][:] - time.fill_value = np.nan - time = time.filled() - v850 = ncid.variables[os.environ['v850_var']][:] - v850.fill_value = np.nan - v850 = v850.filled() - ncid.close() - - # creating the lat and lon in grid format - lonGrid, latGrid = np.meshgrid(lon, lat) - - # getting the daily difference X(t+1) - X(t) - eddies = est.transient_eddies(v850) - - ########################################################## - # Creating the plot for the different seasons - ########################################################## - - print('*** Processing Model Data...') - model_zonal_means = {} - model_zonal_means['lat'] = lat - - season = 'djf' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - season = 'mam' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - season = 'jja' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - season = 'son' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - - #### OBS data ### - print('*** Processing Observations: ERA-Interim') - obs_data_file = os.environ['OBS_DATA'] + '/erai.nc' - obs_topo_file = os.environ['OBS_DATA'] + '/erai_topo.nc' - obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, erai_zonal_means = est.obs_std_dev(obs_data_file, obs_topo_file) - - obs_max_lim = 6 - - print('*** Processing Season: DJF') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'DJF') - plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, title='%s (%d to %d)'%('DJF', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: MAM') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'MAM') - plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, title='%s (%d to %d)'%('MAM', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: JJA') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'JJA') - plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, title='%s (%d to %d)'%('JJA', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: SON') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'SON') - plotter.plot(obs_lon, obs_lat, son, out_file=out_file, title='%s (%d to %d)'%('SON', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Observations: ERA-5') - obs_data_file = os.environ['OBS_DATA'] + '/era5.nc' - obs_topo_file = os.environ['OBS_DATA'] + '/era5_topo.nc' - obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, era5_zonal_means = est.obs_std_dev(obs_data_file, obs_topo_file) - - obs_max_lim = 6 - - print('*** Processing Season: DJF') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'DJF') - plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, title='%s (%d to %d)'%('DJF', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: MAM') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'MAM') - plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, title='%s (%d to %d)'%('MAM', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: JJA') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'JJA') - plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, title='%s (%d to %d)'%('JJA', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: SON') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'SON') - plotter.plot(obs_lon, obs_lat, son, out_file=out_file, title='%s (%d to %d)'%('SON', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - - ########################################################## - #### Plotting Zonal Means for all the different seasons - ########################################################## - print('*** Plotting Zonal Means Image') - out_file = os.environ['WK_DIR']+'/%s.zonal_means.png'%(os.environ['CASENAME']) - plotter.plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file) - - ########################################################## - # Editting HTML Template for the current CASENAME - ########################################################## - - print('*** Editting Templates...') - # Copy template html (and delete old html if necessary) - if os.path.isfile( os.environ["WK_DIR"]+"/eulerian_storm_track.html" ): - os.system("rm -f "+os.environ["WK_DIR"]+"/eulerian_storm_track.html") - - cmd = "cp "+os.environ["POD_HOME"]+"/eulerian_storm_track.html "+os.environ["WK_DIR"]+"/" - os.system(cmd) - cmd = "cp "+os.environ["POD_HOME"]+"/doc/MDTF_Documentation_eulerian_storm_track.pdf "+os.environ["WK_DIR"]+"/" - os.system(cmd) - - # ====================================================================== - # End of HTML sections - # ====================================================================== - - print("*****************************************************************************") - print("Eulerian Storm Track Diagnostic Package (eulerian_storm_track.py) Executed!") - print("*****************************************************************************") - + ########################################################## + # Create the necessary directories + ########################################################## + + if not os.path.exists(os.environ['WORK_DIR'] + '/model'): + os.makedirs(os.environ['WORK_DIR'] + '/model') + if not os.path.exists(os.environ['WORK_DIR'] + '/model/netCDF'): + os.makedirs(os.environ['WORK_DIR'] + '/model/netCDF') + if not os.path.exists(os.environ['WORK_DIR'] + '/model/PS'): + os.makedirs(os.environ['WORK_DIR'] + '/model/PS') + if not os.path.exists(os.environ['WORK_DIR'] + '/obs'): + os.makedirs(os.environ['WORK_DIR'] + '/obs') + if not os.path.exists(os.environ['WORK_DIR'] + '/obs/netCDF'): + os.makedirs(os.environ['WORK_DIR'] + '/obs/netCDF') + if not os.path.exists(os.environ['WORK_DIR'] + '/obs/PS'): + os.makedirs(os.environ['WORK_DIR'] + '/obs/PS') + + ################################################################## + # Reading in the necessary data, and computing the daily eddies + ################################################################## + + netcdf_filename = os.environ['MODEL_OUTPUT_DIR'] + '/' + os.environ['CASENAME'] + '.' + os.environ[ + 'v850_var'] + '.day.nc' + if not os.path.exists(netcdf_filename): + print('Cannot Find File: ', netcdf_filename) + + # temporarily add the lat_var and lon_var + # since these values seem to be missing + os.environ['lat_var'] = 'lat' + os.environ['lon_var'] = 'lon' + os.environ['time_var'] = 'time' + + # reading in the model data + ncid = Dataset(netcdf_filename, 'r') + lat = ncid.variables[os.environ['lat_var']][:] + lat.fill_value = np.nan + lat = lat.filled() + lon = ncid.variables[os.environ['lon_var']][:] + lon.fill_value = np.nan + lon = lon.filled() + time = ncid.variables[os.environ['time_var']][:] + time.fill_value = np.nan + time = time.filled() + v850 = ncid.variables[os.environ['v850_var']][:] + v850.fill_value = np.nan + v850 = v850.filled() + ncid.close() + + # creating the lat and lon in grid format + lonGrid, latGrid = np.meshgrid(lon, lat) + + # getting the daily difference X(t+1) - X(t) + eddies = est.transient_eddies(v850) + + ########################################################## + # Creating the plot for the different seasons + ########################################################## + + print('*** Processing Model Data...') + model_zonal_means = {} + model_zonal_means['lat'] = lat + + season = 'djf' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + season = 'mam' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + season = 'jja' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + season = 'son' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + # OBS data # + print('*** Processing Observations: ERA-Interim') + obs_data_file = os.environ['OBS_DATA'] + '/erai.nc' + obs_topo_file = os.environ['OBS_DATA'] + '/erai_topo.nc' + obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, erai_zonal_means = est.obs_std_dev( + obs_data_file, obs_topo_file) + + obs_max_lim = 6 + + print('*** Processing Season: DJF') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'DJF') + plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, + title='%s (%d to %d)' % ('DJF', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: MAM') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'MAM') + plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, + title='%s (%d to %d)' % ('MAM', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: JJA') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'JJA') + plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, + title='%s (%d to %d)' % ('JJA', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: SON') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'SON') + plotter.plot(obs_lon, obs_lat, son, out_file=out_file, + title='%s (%d to %d)' % ('SON', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Observations: ERA-5') + obs_data_file = os.environ['OBS_DATA'] + '/era5.nc' + obs_topo_file = os.environ['OBS_DATA'] + '/era5_topo.nc' + obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, era5_zonal_means = est.obs_std_dev( + obs_data_file, obs_topo_file) + + obs_max_lim = 6 + + print('*** Processing Season: DJF') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'DJF') + plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, + title='%s (%d to %d)' % ('DJF', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: MAM') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'MAM') + plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, + title='%s (%d to %d)' % ('MAM', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: JJA') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'JJA') + plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, + title='%s (%d to %d)' % ('JJA', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: SON') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'SON') + plotter.plot(obs_lon, obs_lat, son, out_file=out_file, + title='%s (%d to %d)' % ('SON', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + ########################################################## + # Plotting Zonal Means for all the different seasons + ########################################################## + print('*** Plotting Zonal Means Image') + out_file = os.environ['WORK_DIR'] + '/%s.zonal_means.png' % (os.environ['CASENAME']) + plotter.plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file) + + ########################################################## + # Editting HTML Template for the current CASENAME + ########################################################## + + print('*** Editing Templates...') + # Copy template html (and delete old html if necessary) + if os.path.isfile(os.environ["WORK_DIR"] + "/eulerian_storm_track.html"): + os.system("rm -f " + os.environ["WORK_DIR"] + "/eulerian_storm_track.html") + + cmd = "cp " + os.environ["POD_HOME"] + "/eulerian_storm_track.html " + os.environ["WORK_DIR"] + "/" + os.system(cmd) + cmd = "cp " + os.environ["POD_HOME"] + "/doc/MDTF_Documentation_eulerian_storm_track.pdf " + os.environ[ + "WORK_DIR"] + "/" + os.system(cmd) + + # ====================================================================== + # End of HTML sections + # ====================================================================== + + print("*****************************************************************************") + print("Eulerian Storm Track Diagnostic Package (eulerian_storm_track.py) Executed!") + print("*****************************************************************************") diff --git a/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py b/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py index 098750838..1a6876309 100755 --- a/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py +++ b/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py @@ -1,10 +1,10 @@ #!/usr/bin/env python -############# EULERIAN STROM TRACKER ############ -############# Necessary Functions ############### -###### Created by: Jeyavinoth Jeyaratnam #### -###### Created Date: 03/29/2019 #### -###### Last Modified: 01/17/2020 #### +# EULERIAN STROM TRACKER ############ +# Necessary Functions ############### +# Created by: Jeyavinoth Jeyaratnam #### +# Created Date: 03/29/2019 #### +# Last Modified: 01/17/2020 #### ################################################# # Importing standard libraries @@ -16,149 +16,165 @@ 1) Average 6hrly to daily 2) take x(t+1) - x(t) 3) for each year, for season, get std_dev - 4) avergae std_dev for all years + 4) average std_dev for all years ''' -def six_hrly_to_daily(data, start_year, time): - ''' - Data has to be provided as six hourly timesteps, in a numpy array format (time x lon x lat), lon and lat can be changed, but keep track of it - the time variable has to be given in six hourly increments, since the start_year [0, 6, 12, 18, 24, 30, 36, 42, 48] - where start_year is the starting year of the given data - Output: - numpy array in the format time x lon x lat (lon, lat depends on your input) - output time dimension size will be the number of days provided in the time array - ''' - # convert time to numpy array - time = np.asarray(time) +def six_hrly_to_daily(data, time): + """ + Data has to be provided as six hourly timesteps, in a numpy array format (time x lon x lat), lon and lat can be + changed, but keep track of it + the time variable has to be given in six hourly increments, since the start_year + [0, 6, 12, 18, 24, 30, 36, 42, 48] + where start_year is the starting year of the given data - # check if time array and data time dimension is the same - if (len(time) != data.shape[0]): - raise Exception ("Time dimensions don't match!") + Output: + numpy array in the format time x lon x lat (lon, lat depends on your input) + output time dimension size will be the number of days provided in the time array + """ + # convert time to numpy array + time = np.asarray(time) - # converting six hrly timesteps into the days - time_in_days = (time//24) + 1 - - min_time = min(time_in_days) - max_time = max(time_in_days) - time_range = range(min_time, max_time+1) + # check if time array and data time dimension is the same + if len(time) != data.shape[0]: + raise Exception("Time dimensions don't match!") - out_time = np.empty((len(time_range),))*np.nan - out_data = np.empty((len(time_range), data.shape[1], data.shape[2]))*np.nan + # converting six hrly timesteps into the days + time_in_days = (time//24) + 1 - # looping through the days and creating the output array - for ind, day in enumerate(time_range): - out_data[ind, :, :] = np.nansum(data[time_in_days == day, :, :], axis=0) - out_time[ind] = day + min_time = min(time_in_days) + max_time = max(time_in_days) + time_range = range(min_time, max_time+1) + + out_time = np.empty((len(time_range),))*np.nan + out_data = np.empty((len(time_range), data.shape[1], data.shape[2]))*np.nan + + # looping through the days and creating the output array + for ind, day in enumerate(time_range): + out_data[ind, :, :] = np.nansum(data[time_in_days == day, :, :], axis=0) + out_time[ind] = day + + return out_data, out_time - return out_data, out_time def daily_diff(daily_data): - ''' - Data has to be provided as daily_data - it will compute the difference between the current day and the previous day - i.e. X(t+1) - X(t), nans for the last index - ''' - # pad the right of the array with nan values along the first dimension - # then extract the values from the 2nd column (index = 1) to the end - # this will give us a shifted array of daily data, i.e. X(t+1) - daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] + """ + Data has to be provided as daily_data + it will compute the difference between the current day and the previous day + i.e. X(t+1) - X(t), nans for the last index + """ + # pad the right of the array with nan values along the first dimension + # then extract the values from the 2nd column (index = 1) to the end + # this will give us a shifted array of daily data, i.e. X(t+1) + daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] - return daily_data_shift - daily_data # X(t+1) - X(t), with nan values for the last time dimension + return daily_data_shift - daily_data # X(t+1) - X(t), with nan values for the last time dimension def std_dev(data, time_ind): - ''' - Given data input in the format (time, lat, lon) - we will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size of the time dimension - ''' - out_std_dev = np.empty((data.shape[1], data.shape[2]))*np.nan - - # check if any value is true for the selected time, if so then return nan values, else compute standard deviation - if np.all(np.invert(time_ind)): - print ('No time index selected!') - return (out_std_dev) - else: - return np.nanstd(data[time_ind, :, :], axis=0) - -def get_time_ind(start_year, time, season='djf'): - ''' Get the time index for the given season ''' - - # convert time as numpy array - time = np.asarray(time) - - # getting the datetime values for the time index - dates_month=[] - dates_year=[] - for i_time in time: - temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=np.float(i_time)-1) - dates_month.append(temp_time.month) - dates_year.append(temp_time.year) - - dates_month = np.asarray(dates_month) - - # getting the time index - if (season == ''): - raise Exception('Set which season you want to extract!') - elif (season == 'djf'): - time_ind = (dates_month == 12) | (dates_month == 1) | (dates_month == 2) - elif (season == 'mam'): - time_ind = (dates_month == 3) | (dates_month == 4) | (dates_month == 5) - elif (season == 'jja'): - time_ind = (dates_month == 6) | (dates_month == 7) | (dates_month == 8) - elif (season == 'son'): - time_ind = (dates_month == 9) | (dates_month == 10) | (dates_month == 11) - - return time_ind - -def old_std_dev(data, start_year, time, time_period='yearly', season=''): - ''' - Data input has to be daily in the format (time, lat, lon) - start_year has to be the start year of the given array - if an incomplete data array along the time dimension is provided, then you have to specify the time variable - time vaiable has to be specified in days, since start_year [1,2,3,4,5,6,7], default=finds the time starting from day 1 - time_period includes 'yearly', 'seasonally', 'all', default='all' means avarage of all years - if 'byseason' then have to set season variable to be: djf', 'mam', 'jja', 'son' - - Output: - returns standard_deviation for the given time_period, and the time array that corresponds to the std_dev output - out_time is zero for time_period='all' - ''' - # convert time as numpy array - time = np.asarray(time) - - # getting the datetime values for the time index - dates_month=[] - dates_year=[] - for i_time in time: - temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=np.float(i_time)-1) - dates_month.append(temp_time.month) - dates_year.append(temp_time.year) - - uni_year = sorted(set(dates_year)) - dates_month = np.asarray(dates_month) - dates_year = np.asarray(dates_year) - - # getting the time_ind - if (time_period == 'all'): - return np.nanstd(data, axis=0), 0 - else: + """ + Given data input in the format (time, lat, lon) + we will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the + size of the time dimension + """ + out_std_dev = np.empty((data.shape[1], data.shape[2]))*np.nan + + # check if any value is true for the selected time, if so then return nan values, else compute standard deviation + if np.all(np.invert(time_ind)): + print('No time index selected!') + return out_std_dev + else: + return np.nanstd(data[time_ind, :, :], axis=0) + + +def get_time_ind(start_year, time, season: str = 'djf'): + """ + Args: + start_year: + time: + season: + + Returns: + The time index for the given season + """ + + # convert time as numpy array + time = np.asarray(time) + + # getting the datetime values for the time index + dates_month = [] + dates_year = [] + for i_time in time: + temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=float(i_time)-1) + dates_month.append(temp_time.month) + dates_year.append(temp_time.year) + + dates_month = np.asarray(dates_month) + # getting the time index - if (time_period == 'yearly'): - time_ind = (dates_month > 0) - elif (time_period == 'seasonally'): - if (season == ''): + if season == '': raise Exception('Set which season you want to extract!') - elif (season == 'djf'): + elif season == 'djf': time_ind = (dates_month == 12) | (dates_month == 1) | (dates_month == 2) - elif (season == 'mam'): + elif season == 'mam': time_ind = (dates_month == 3) | (dates_month == 4) | (dates_month == 5) - elif (season == 'jja'): + elif season == 'jja': time_ind = (dates_month == 6) | (dates_month == 7) | (dates_month == 8) - elif (season == 'son'): + elif season == 'son': time_ind = (dates_month == 9) | (dates_month == 10) | (dates_month == 11) - else: - raise Exception('Error in the time_period set!') + + return time_ind + + +def old_std_dev(data, start_year, time, time_period: str = 'yearly', season: str = ''): + """ + Data input has to be daily in the format (time, lat, lon) + start_year has to be the start year of the given array + if an incomplete data array along the time dimension is provided, then you have to specify the time variable + time vaiable has to be specified in days, since start_year [1,2,3,4,5,6,7], default=finds the time starting + from day 1 + time_period includes 'yearly', 'seasonally', 'all', default='all' means avarage of all years + if 'byseason' then have to set season variable to be: djf', 'mam', 'jja', 'son' + + Output: + returns standard_deviation for the given time_period, and the time array that corresponds to the std_dev output + out_time is zero for time_period='all' + """ + # convert time as numpy array + time = np.asarray(time) + + # getting the datetime values for the time index + dates_month = [] + dates_year = [] + for i_time in time: + temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=float(i_time)-1) + dates_month.append(temp_time.month) + dates_year.append(temp_time.year) + + uni_year = sorted(set(dates_year)) + dates_month = np.asarray(dates_month) + dates_year = np.asarray(dates_year) + + # getting the time_ind + if time_period == 'all': + return np.nanstd(data, axis=0), 0 + else: + # getting the time index + if time_period == 'yearly': + time_ind = (dates_month > 0) + elif time_period == 'seasonally': + if season == '': + raise Exception('Set which season you want to extract!') + elif season == 'djf': + time_ind = (dates_month == 12) | (dates_month == 1) | (dates_month == 2) + elif season == 'mam': + time_ind = (dates_month == 3) | (dates_month == 4) | (dates_month == 5) + elif season == 'jja': + time_ind = (dates_month == 6) | (dates_month == 7) | (dates_month == 8) + elif season == 'son': + time_ind = (dates_month == 9) | (dates_month == 10) | (dates_month == 11) + else: + raise Exception('Error in the time_period set!') # initialize output array out_time = np.empty((len(uni_year),))*np.nan @@ -167,23 +183,21 @@ def old_std_dev(data, start_year, time, time_period='yearly', season=''): # for each year we have to get the std_dev data for out_ind, year in enumerate(uni_year): - # setting the time array output - out_time[out_ind] = year + # setting the time array output + out_time[out_ind] = year - # getting the matching index for the each unique year - year_ind = (dates_year == year) + # getting the matching index for the each unique year + year_ind = (dates_year == year) - # overlapping with the season index, or all if time_period is yearly - final_ind = year_ind & time_ind + # overlapping with the season index, or all if time_period is yearly + final_ind = year_ind & time_ind - # check if any value is true for the selected time, if so then continue, else compute standard deviation - if np.all(np.invert(final_ind)): - print ('Debug: Nothing found!') - breakpoint() - continue - else: - out_std_dev[out_ind, :, :] = np.nanstd(data[final_ind, :, :], axis=0) + # check if any value is true for the selected time, if so then continue, else compute standard deviation + if np.all(np.invert(final_ind)): + print('Debug: Nothing found!') + breakpoint() + continue + else: + out_std_dev[out_ind, :, :] = np.nanstd(data[final_ind, :, :], axis=0) return out_std_dev, out_time - - diff --git a/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py b/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py index 701033e66..ec11d2755 100755 --- a/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py +++ b/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py @@ -1,10 +1,10 @@ #!/usr/bin/env python -############# EULERIAN STROM TRACKER ############ -###### Created by: Jeyavinoth Jeyaratnam #### -###### Created Date: 03/29/2019 #### -###### Last Modified: 05/09/2019 #### -################################################# +# EULERIAN STROM TRACKER ############ +# Created by: Jeyavinoth Jeyaratnam #### +# Created Date: 03/29/2019 #### +# Last Modified: 05/09/2019 #### +############################################ # Importing standard libraries import numpy as np @@ -13,157 +13,164 @@ from netCDF4 import Dataset import warnings + def transient_eddies(daily_data): - ''' - Data has to be provided as daily_data - it will compute the difference between the current day and the previous day - i.e. X(t+1) - X(t), nans for the last index - in Booth et al., 2017, vprime = (x(t+1) - x(t))/2. - ''' - # pad the right of the array with nan values along the first dimension - # then extract the values from the 2nd column (index = 1) to the end - # this will give us a shifted array of daily data, i.e. X(t+1) - daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] - - return (daily_data_shift - daily_data)/2. # X(t+1) - X(t), with nan values for the last time dimension + """ + Data has to be provided as daily_data + it will compute the difference between the current day and the previous day + i.e. X(t+1) - X(t), nans for the last index + in Booth et al., 2017, vprime = (x(t+1) - x(t))/2. + """ + # pad the right of the array with nan values along the first dimension + # then extract the values from the 2nd column (index = 1) to the end + # this will give us a shifted array of daily data, i.e. X(t+1) + daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] + + return (daily_data_shift - daily_data)/2. # X(t+1) - X(t), with nan values for the last time dimension + def model_std_dev(data, start_year, time, season='djf'): - ''' - Data input should be in the format (time, lat, lon) - We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size of the time dimension - ''' - # convert time as numpy array - time = np.asarray(time) - - # getting the datetime values for the time index - dates_month=[] - dates_year=[] - for i_time in time: - temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=np.float(i_time)-1) - dates_month.append(temp_time.month) - dates_year.append(temp_time.year) - - dates_month = np.asarray(dates_month) - dates_year = np.asarray(dates_year) - - eddy_year = [] - for i_year in range(int(os.environ['FIRSTYR']), int(os.environ['LASTYR'])+1): - if (season == 'djf'): - time_ind = ((dates_year == i_year) & (dates_month == 1)) | ((dates_year == i_year) & (dates_month == 2)) | ((dates_year == i_year-1) & (dates_month == 12)) - elif (season == 'mam'): - time_ind = ((dates_year == i_year) & (dates_month == 3)) | ((dates_year == i_year) & (dates_month == 4)) | ((dates_year == i_year) & (dates_month == 5)) - elif (season == 'jja'): - time_ind = ((dates_year == i_year) & (dates_month == 6)) | ((dates_year == i_year) & (dates_month == 7)) | ((dates_year == i_year) & (dates_month == 8)) - elif (season == 'son'): - time_ind = ((dates_year == i_year) & (dates_month == 9)) | ((dates_year == i_year) & (dates_month == 10)) | ((dates_year == i_year) & (dates_month == 11)) + """ + Data input should be in the format (time, lat, lon) + We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size + of the time dimension + """ + # convert time as numpy array + time = np.asarray(time) + + # getting the datetime values for the time index + dates_month = [] + dates_year = [] + for i_time in time: + temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=float(i_time)-1) + dates_month.append(temp_time.month) + dates_year.append(temp_time.year) + + dates_month = np.asarray(dates_month) + dates_year = np.asarray(dates_year) + + eddy_year = [] + for i_year in range(int(os.environ['startdate']), int(os.environ['enddate']) + 1): + if season == 'djf': + time_ind = (((dates_year == i_year) & (dates_month == 1)) | ((dates_year == i_year) & (dates_month == 2)) + | ((dates_year == i_year-1) & (dates_month == 12))) + elif season == 'mam': + time_ind = (((dates_year == i_year) & (dates_month == 3)) | ((dates_year == i_year) & (dates_month == 4)) + | ((dates_year == i_year) & (dates_month == 5))) + elif season == 'jja': + time_ind = (((dates_year == i_year) & (dates_month == 6)) | ((dates_year == i_year) & (dates_month == 7)) + | ((dates_year == i_year) & (dates_month == 8))) + elif season == 'son': + time_ind = (((dates_year == i_year) & (dates_month == 9)) | ((dates_year == i_year) & (dates_month == 10)) + | ((dates_year == i_year) & (dates_month == 11))) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + eddy_season_mean = np.sqrt(np.nanmean(data[time_ind, :, :] ** 2, axis=0)) + eddy_year.append(eddy_season_mean) with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - eddy_season_mean = np.sqrt(np.nanmean(data[time_ind, :, :] ** 2, axis=0)) - eddy_year.append(eddy_season_mean) + warnings.simplefilter("ignore", category=RuntimeWarning) + eddy_year = np.asarray(eddy_year) + out_std_dev = np.nanmean(eddy_year, axis=0) + zonal_mean = np.nanmean(out_std_dev, axis=1) + zonal_mean[zonal_mean == 0] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - eddy_year = np.asarray(eddy_year) - out_std_dev = np.nanmean(eddy_year, axis=0) - zonal_mean = np.nanmean(out_std_dev, axis=1) - zonal_mean[zonal_mean == 0] = np.nan + return out_std_dev, zonal_mean - return out_std_dev, zonal_mean def obs_std_dev(obs_data_file, obs_topo_file): - ''' - Data input should be in the format (time, lat, lon) - We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size of the time dimension - ''' - - nc = Dataset(obs_data_file, 'r') - nc.set_auto_mask(False) - - in_lat = nc.variables['lat'][:] - in_lon = nc.variables['lon'][:] - in_time = nc.variables['time'][:] - - in_jf = nc.variables['jf_sq_eddy'][:] - in_mam = nc.variables['mam_sq_eddy'][:] - in_jja = nc.variables['jja_sq_eddy'][:] - in_son = nc.variables['son_sq_eddy'][:] - in_dec = nc.variables['dec_sq_eddy'][:] - - nc.close() - - # read in the topography information to filter before computing the zonal mean - nc = Dataset(obs_topo_file, 'r') - in_topo = nc.variables['topo'][:] - nc.close() - - topo_cond = (in_topo > 1000) - - djf_year = [] - mam_year = [] - jja_year = [] - son_year = [] - - start_year = int(os.environ['FIRSTYR']) - end_year = int(os.environ['LASTYR']) - - start_year = max([start_year, min(in_time)]) - end_year = min([end_year, max(in_time)]) - - for i_year in range(start_year, end_year+1): - - if not ((i_year == start_year)): - i_djf = np.squeeze(in_dec[in_time == i_year-1, :, :, :] + in_jf[in_time == i_year, :, :, :]) - i_djf = np.sqrt(i_djf[0, :, :]/i_djf[1, :, :]) - djf_year.append(i_djf) - - i_mam = np.squeeze(in_mam[in_time == i_year, :, :, :]) - i_mam = np.sqrt(i_mam[0, :, :]/i_mam[1, :, :]) - mam_year.append(i_mam) - - i_jja = np.squeeze(in_jja[in_time == i_year, :, :, :]) - i_jja = np.sqrt(i_jja[0, :, :]/i_jja[1, :, :]) - jja_year.append(i_jja) - - i_son = np.squeeze(in_son[in_time == i_year, :, :, :]) - i_son = np.sqrt(i_son[0, :, :]/i_son[1, :, :]) - son_year.append(i_son) - - djf_year = np.asarray(djf_year) - djf = np.nanmean(djf_year, axis=0) - djf[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_djf = np.nanmean(djf, axis=1) - zonal_djf[zonal_djf == 0] = np.nan + """ + Data input should be in the format (time, lat, lon) + We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array + of the size of the time dimension + """ + + nc = Dataset(obs_data_file, 'r') + nc.set_auto_mask(False) + + in_lat = nc.variables['lat'][:] + in_lon = nc.variables['lon'][:] + in_time = nc.variables['time'][:] + + in_jf = nc.variables['jf_sq_eddy'][:] + in_mam = nc.variables['mam_sq_eddy'][:] + in_jja = nc.variables['jja_sq_eddy'][:] + in_son = nc.variables['son_sq_eddy'][:] + in_dec = nc.variables['dec_sq_eddy'][:] + + nc.close() + + # read in the topography information to filter before computing the zonal mean + nc = Dataset(obs_topo_file, 'r') + in_topo = nc.variables['topo'][:] + nc.close() + + topo_cond = (in_topo > 1000) + + djf_year = [] + mam_year = [] + jja_year = [] + son_year = [] + + start_year = int(os.environ['startdate']) + end_year = int(os.environ['enddate']) + + start_year = max([start_year, min(in_time)]) + end_year = min([end_year, max(in_time)]) + + for i_year in range(start_year, end_year+1): + if not i_year == start_year: + i_djf = np.squeeze(in_dec[in_time == i_year-1, :, :, :] + in_jf[in_time == i_year, :, :, :]) + i_djf = np.sqrt(i_djf[0, :, :]/i_djf[1, :, :]) + djf_year.append(i_djf) + + i_mam = np.squeeze(in_mam[in_time == i_year, :, :, :]) + i_mam = np.sqrt(i_mam[0, :, :]/i_mam[1, :, :]) + mam_year.append(i_mam) + + i_jja = np.squeeze(in_jja[in_time == i_year, :, :, :]) + i_jja = np.sqrt(i_jja[0, :, :]/i_jja[1, :, :]) + jja_year.append(i_jja) + + i_son = np.squeeze(in_son[in_time == i_year, :, :, :]) + i_son = np.sqrt(i_son[0, :, :]/i_son[1, :, :]) + son_year.append(i_son) + + djf_year = np.asarray(djf_year) + djf = np.nanmean(djf_year, axis=0) + djf[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_djf = np.nanmean(djf, axis=1) + zonal_djf[zonal_djf == 0] = np.nan - mam_year = np.asarray(mam_year) - mam = np.nanmean(mam_year, axis=0) - mam[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_mam = np.nanmean(mam, axis=1) - zonal_mam[zonal_mam == 0] = np.nan + mam_year = np.asarray(mam_year) + mam = np.nanmean(mam_year, axis=0) + mam[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_mam = np.nanmean(mam, axis=1) + zonal_mam[zonal_mam == 0] = np.nan - jja_year = np.asarray(jja_year) - jja = np.nanmean(jja_year, axis=0) - jja[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_jja = np.nanmean(jja, axis=1) - zonal_jja[zonal_jja == 0] = np.nan + jja_year = np.asarray(jja_year) + jja = np.nanmean(jja_year, axis=0) + jja[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_jja = np.nanmean(jja, axis=1) + zonal_jja[zonal_jja == 0] = np.nan - son_year = np.asarray(son_year) - son = np.nanmean(son_year, axis=0) - son[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_son = np.nanmean(son, axis=1) - zonal_son[zonal_son == 0] = np.nan + son_year = np.asarray(son_year) + son = np.nanmean(son_year, axis=0) + son[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_son = np.nanmean(son, axis=1) + zonal_son[zonal_son == 0] = np.nan - lonGrid, latGrid = np.meshgrid(in_lon, in_lat) - - zonal_means = {'djf': zonal_djf, 'jja': zonal_jja, 'son': zonal_son, 'mam': zonal_mam, 'lat': in_lat} + lonGrid, latGrid = np.meshgrid(in_lon, in_lat) - return latGrid, lonGrid, djf, mam, jja, son, start_year, end_year, zonal_means + zonal_means = {'djf': zonal_djf, 'jja': zonal_jja, 'son': zonal_son, 'mam': zonal_mam, 'lat': in_lat} + return latGrid, lonGrid, djf, mam, jja, son, start_year, end_year, zonal_means diff --git a/diagnostics/eulerian_storm_track/plotter.py b/diagnostics/eulerian_storm_track/plotter.py index 294fe9ad4..3d2e9f440 100755 --- a/diagnostics/eulerian_storm_track/plotter.py +++ b/diagnostics/eulerian_storm_track/plotter.py @@ -6,90 +6,90 @@ from cartopy.util import add_cyclic_point import numpy as np -def plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file=''): - - plt.close('all') - - plt.figure(figsize=(8,12)) - plt.subplot(2,2,1) - plt.plot(model_zonal_means['djf'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['djf'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['djf'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('DJF') - plt.legend(loc=0) - plt.ylim(-80, 80) - plt.ylabel('Latitude') - - plt.subplot(2,2,2) - plt.plot(model_zonal_means['jja'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['jja'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['jja'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('JJA') - plt.legend(loc=0) - plt.ylim(-80, 80) - - plt.subplot(2,2,3) - plt.plot(model_zonal_means['mam'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['mam'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['mam'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('MAM') - plt.legend(loc=0) - plt.ylim(-80, 80) - plt.ylabel('Latitude') - plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') - - plt.subplot(2,2,4) - plt.plot(model_zonal_means['son'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['son'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['son'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('SON') - plt.legend(loc=0) - plt.ylim(-80, 80) - plt.ylabel('Latitude') - plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') - - plt.tight_layout() - if (len(out_file) > 0): - if (out_file.endswith('.ps')): - plt.savefig(out_file, format='eps', dpi=300.) - plt.close('all') - elif (out_file.endswith('.png')): - plt.savefig(out_file, format='png', dpi=300.) - plt.close('all') - - -def plot(lonGrid, latGrid, data, show=False, out_file='', title='', **kwargs): - - plt.close('all') - - plt.figure() - - # adding cyclic point - # provided the values are given as lat x lon - lons = lonGrid[0,:] - lats = latGrid[:,0] - - new_data, new_lons = add_cyclic_point(data, coord=lons) - new_lonGrid, new_latGrid = np.meshgrid(new_lons, lats) - - ax = plt.axes(projection=cartopy.crs.PlateCarree()) - ax.coastlines() - # getting rid of the line due to lack of continuity - _ = plt.contourf(new_lonGrid, new_latGrid, new_data, cmap='jet', **kwargs) - cb = plt.colorbar(ax=ax, shrink=0.5) - cb.ax.set_ylabel(r'$\tilde{V}^{st}_{850}$ [m/s]') - - if (len(title) > 0): - plt.title(title) - - if (show): - plt.show() - - if (len(out_file) > 0): - if (out_file.endswith('.ps')): - plt.savefig(out_file, format='eps', dpi=300.) - plt.close('all') - elif (out_file.endswith('.png')): - plt.savefig(out_file, format='png', dpi=300.) - plt.close('all') +def plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file: str = ''): + + plt.close('all') + + plt.figure(figsize=(8,12)) + plt.subplot(2,2,1) + plt.plot(model_zonal_means['djf'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['djf'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['djf'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('DJF') + plt.legend(loc=0) + plt.ylim(-80, 80) + plt.ylabel('Latitude') + + plt.subplot(2,2,2) + plt.plot(model_zonal_means['jja'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['jja'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['jja'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('JJA') + plt.legend(loc=0) + plt.ylim(-80, 80) + + plt.subplot(2, 2, 3) + plt.plot(model_zonal_means['mam'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['mam'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['mam'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('MAM') + plt.legend(loc=0) + plt.ylim(-80, 80) + plt.ylabel('Latitude') + plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') + + plt.subplot(2,2,4) + plt.plot(model_zonal_means['son'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['son'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['son'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('SON') + plt.legend(loc=0) + plt.ylim(-80, 80) + plt.ylabel('Latitude') + plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') + + plt.tight_layout() + if len(out_file) > 0: + if out_file.endswith('.ps'): + plt.savefig(out_file, format='eps', dpi=300.) + plt.close('all') + elif out_file.endswith('.png'): + plt.savefig(out_file, format='png', dpi=300.) + plt.close('all') + + +def plot(lonGrid, latGrid, data, show: bool = False, out_file: str = '', title: str ='', **kwargs): + + plt.close('all') + + plt.figure() + + # adding cyclic point + # provided the values are given as lat x lon + lons = lonGrid[0,:] + lats = latGrid[:,0] + + new_data, new_lons = add_cyclic_point(data, coord=lons) + new_lonGrid, new_latGrid = np.meshgrid(new_lons, lats) + + ax = plt.axes(projection=cartopy.crs.PlateCarree()) + ax.coastlines() + # getting rid of the line due to lack of continuity + _ = plt.contourf(new_lonGrid, new_latGrid, new_data, cmap='jet', **kwargs) + cb = plt.colorbar(ax=ax, shrink=0.5) + cb.ax.set_ylabel(r'$\tilde{V}^{st}_{850}$ [m/s]') + + if len(title) > 0: + plt.title(title) + + if show: + plt.show() + + if len(out_file) > 0: + if out_file.endswith('.ps'): + plt.savefig(out_file, format='eps', dpi=300.) + plt.close('all') + elif out_file.endswith('.png'): + plt.savefig(out_file, format='png', dpi=300.) + plt.close('all') diff --git a/diagnostics/eulerian_storm_track/settings.jsonc b/diagnostics/eulerian_storm_track/settings.jsonc index 75739cd65..a443b9964 100755 --- a/diagnostics/eulerian_storm_track/settings.jsonc +++ b/diagnostics/eulerian_storm_track/settings.jsonc @@ -2,7 +2,7 @@ "settings" : { "driver" : "eulerian_storm_track.py", "long_name" : "Eulerian Storm Track", - "realm" : "atmos", + "convention": "cmip", "description" : "Eulerian Storm Track", "pod_env_vars" : { "lat_var": "lat", @@ -13,8 +13,16 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"}, "lev": { "standard_name": "air_pressure", @@ -29,6 +37,7 @@ "varlist" : { "v850": { "standard_name" : "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "use_exact_name": true, diff --git a/diagnostics/example/example.html b/diagnostics/example/example.html index 9822edece..050ca8838 100644 --- a/diagnostics/example/example.html +++ b/diagnostics/example/example.html @@ -28,7 +28,7 @@ Example diagnostic: time-averaged near-surface temperature
plot -->
diff --git a/diagnostics/example_multicase/example_multicase.py b/diagnostics/example_multicase/example_multicase.py
index 1f9a733d3..b66ada428 100755
--- a/diagnostics/example_multicase/example_multicase.py
+++ b/diagnostics/example_multicase/example_multicase.py
@@ -34,11 +34,13 @@
#
# Required programming language and libraries
#
-# * Python >= 3.7
+# * Python >= 3.10
# * xarray
# * matplotlib
+# * intake
# * yaml
# * sys
+# * os
# * numpy
#
# Required model output variables
@@ -58,52 +60,46 @@
matplotlib.use("Agg") # non-X windows backend
-import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
-import yaml
+import intake
import sys
+import yaml
# Part 1: Read in the model data
# ------------------------------
-
-# Receive a dictionary of case information from the framework. For now, we will
-# "fake" a dictionary now with information we are getting from the single case
-# POD that is processed by the framework
+# Debugging: remove following line in final PR
+# os.environ["WORK_DIR"] = "/Users/jess/mdtf/wkdir/MDTF_output/example_multicase"
+work_dir = os.environ["WORK_DIR"]
+# Receive a dictionary of case information from the framework
print("reading case_info")
+# Remove following line final PR
+# os.environ["case_env_file"] = os.path.join(work_dir, "case_info.yml")
case_env_file = os.environ["case_env_file"]
-assert(os.path.isfile(case_env_file))
+assert os.path.isfile(case_env_file), f"case environment file not found"
with open(case_env_file, 'r') as stream:
try:
case_info = yaml.safe_load(stream)
- # print(parsed_yaml)
except yaml.YAMLError as exc:
print(exc)
-# Sample case_info template ingested from yaml file ('case_info.yaml')
-# case_info = {
-# "CASENAME": {
-# "NAME": os.environ["CASENAME"],
-# "TAS_FILE": os.environ["TAS_FILE"],
-# "tas_var": os.environ["tas_var"],
-# "time_coord": os.environ["time_coord"],
-# "lon_coord": os.environ["lon_coord"],
-# },
-# "CASENAME1": {
-# "NAME": os.environ["CASENAME"],
-# "TAS_FILE": os.environ["TAS_FILE"],
-# "tas_var": os.environ["tas_var"],
-# "time_coord": os.environ["time_coord"],
-# "lon_coord": os.environ["lon_coord"],
-# },
-# }
-
-# Loop over cases and load datasets into a separate dict
-model_datasets = dict()
-for case_name, case_dict in case_info.items():
- ds = xr.open_dataset(case_dict["TAS_FILE"], use_cftime=True)
- model_datasets[case_name] = ds
- #print(ds)
+cat_def_file = case_info['CATALOG_FILE']
+case_list = case_info['CASE_LIST']
+# all cases share variable names and dimension coords, so just get first result for each
+tas_var = [case['tas_var'] for case in case_list.values()][0]
+time_coord = [case['time_coord'] for case in case_list.values()][0]
+lat_coord = [case['lat_coord'] for case in case_list.values()][0]
+lon_coord = [case['lon_coord'] for case in case_list.values()][0]
+# open the csv file using information provided by the catalog definition file
+cat = intake.open_esm_datastore(cat_def_file)
+# filter catalog by desired variable and output frequency
+tas_subset = cat.search(variable_id=tas_var, frequency="day")
+# examine assets for a specific file
+#tas_subset['CMIP.synthetic.day.r1i1p1f1.day.gr.atmos.r1i1p1f1.1980-01-01-1984-12-31'].df
+# convert tas_subset catalog to an xarray dataset dict
+tas_dict = tas_subset.to_dataset_dict(
+ xarray_open_kwargs={"decode_times": True, "use_cftime": True}
+)
# Part 2: Do some calculations (time and zonal means)
# ---------------------------------------------------
@@ -111,11 +107,12 @@
tas_arrays = {}
# Loop over cases
+for k, v in tas_dict.items():
+ # load the tas data for case k
+ arr = tas_dict[k][tas_var]
-for k, v in case_info.items():
# take the time mean
- arr = model_datasets[k][case_info[k]["tas_var"]]
- arr = arr.mean(dim=case_info[k]["time_coord"])
+ arr = arr.mean(dim=tas_dict[k][time_coord].name)
# this block shuffles the data to make this single case look more
# interesting. ** DELETE THIS ** once we test with real data
@@ -130,7 +127,7 @@
arr = arr - arr.mean()
# take the zonal mean
- arr = arr.mean(dim=case_info[k]["lon_coord"])
+ arr = arr.mean(dim=tas_dict[k][lon_coord].name)
tas_arrays[k] = arr
@@ -153,18 +150,16 @@
plt.title("Zonal Mean Surface Air Temperature Anomaly")
# save the plot in the right location
-work_dir = os.environ["WK_DIR"]
-assert os.path.isdir(f"{work_dir}/model/PS")
-plt.savefig(f"{work_dir}/model/PS/example_model_plot.eps", bbox_inches="tight")
-
-
-# Part 4: Clean up and close open file handles
-# --------------------------------------------
-
-_ = [x.close() for x in model_datasets.values()]
+assert os.path.isdir(f"{work_dir}/model/PS"), f'Assertion error: {work_dir}/model/PS not found'
+plt.savefig(f"{work_dir}/model/PS/example_multicase_plot.eps", bbox_inches="tight")
-# Part 5: Confirm POD executed sucessfully
+# Part 4: Close the catalog files and
+# release variable dict reference for garbage collection
+# ------------------------------------------------------
+cat.close()
+tas_dict = None
+# Part 5: Confirm POD executed successfully
# ----------------------------------------
print("Last log message by example_multicase POD: finished successfully!")
sys.exit(0)
diff --git a/diagnostics/example_multicase/example_multirun_demo.ipynb b/diagnostics/example_multicase/example_multirun_demo.ipynb
new file mode 100644
index 000000000..04493cd12
--- /dev/null
+++ b/diagnostics/example_multicase/example_multirun_demo.ipynb
@@ -0,0 +1,1424 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "2c1fb53d-9b9f-41cc-b36c-1beeb791f2dd",
+ "metadata": {},
+ "source": [
+ "# MDTF Example Diagnostic POD for Multiple Cases / Experiments\n",
+ "### Uses: Data catalogs\n",
+ "### Disclaimer: Debug mode with some hardcoded values will be found in this notebook. MDTF integration is underway. This is essentially to see how things may flow from the user configuration files to the MDTF framework and into the POD, including figure generation. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "0c4fbe60-f63d-432a-9166-95cd312ba442",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# ================================================================================\n",
+ "#\n",
+ "# This file is part of the Multicase Example Diagnostic POD of the MDTF code\n",
+ "# package (see mdtf/MDTF-diagnostics/LICENSE.txt)\n",
+ "#\n",
+ "# Example Diagnostic POD\n",
+ "#\n",
+ "# Last update: March 2024 (exploring intake-esm catalog in the MDTF framework and the POD)\n",
+ "#\n",
+ "# This example builds upon the single case `example` POD\n",
+ "# and illustrates how to design and implement a POD that uses multiple\n",
+ "# model source datasets. These can be the same experiment with different\n",
+ "# models, two different experiments from the same model, or two different\n",
+ "# time periods within the same simulation.\n",
+ "#\n",
+ "# Version & Contact info\n",
+ "#\n",
+ "# - Version/revision information: version 1.1 (Oct-2022)\n",
+ "# - Model Development Task Force Framework Team\n",
+ "#\n",
+ "# Open source copyright agreement\n",
+ "#\n",
+ "# The MDTF framework is distributed under the LGPLv3 license (see LICENSE.txt).\n",
+ "#\n",
+ "# Functionality\n",
+ "#\n",
+ "# Metadata associated with the different cases are passed from the\n",
+ "# framework to the POD via a yaml file (case_info.yaml) that the POD reads into a dictionary.\n",
+ "# The POD iterates over the case entries in the dictionary and opens the input datasets.\n",
+ "# The `tas` variable is extracted for each case and the time average is taken over the dataset.\n",
+ "# Anomalies are calculated relative to the global mean and then zonally-averaged. The resulting plot\n",
+ "# contains one line for each case.\n",
+ "#\n",
+ "# Required programming language and libraries\n",
+ "#\n",
+ "# * Python >= 3.10\n",
+ "# * xarray\n",
+ "# * matplotlib\n",
+ "# * intake\n",
+ "# * yaml\n",
+ "# * sys\n",
+ "# * os\n",
+ "# * numpy\n",
+ "#\n",
+ "# Required model output variables\n",
+ "#\n",
+ "# * tas - Surface (2-m) air temperature (CF: air_temperature)\n",
+ "#\n",
+ "# References\n",
+ "#\n",
+ "# Maloney, E. D, and Co-authors, 2019: Process-oriented evaluation of climate\n",
+ "# and wether forcasting models. BAMS, 100(9), 1665-1686,\n",
+ "# doi:10.1175/BAMS-D-18-0042.1."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4e6c0cc5-28bd-4fa9-9210-730842c65a44",
+ "metadata": {},
+ "source": [
+ "## Import necessary packages"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "964df112-b4a8-40c5-b7d5-e96f65ddf084",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Import modules used in the POD\n",
+ "import os\n",
+ "import matplotlib\n",
+ "\n",
+ "matplotlib.use(\"Agg\") # non-X windows backend\n",
+ "\n",
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline \n",
+ "import numpy as np\n",
+ "import intake\n",
+ "import sys\n",
+ "import yaml\n",
+ "import warnings\n",
+ "\n",
+ "warnings.filterwarnings(\"ignore\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "48be4b51-1eb6-437d-8868-7b71cb418454",
+ "metadata": {},
+ "source": [
+ "NOTE: We are exploring..so..there will be some hardcoded paths here, which MDTF framework\n",
+ "will help us remove when the feature is available in the framework."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "39a51b87-e19b-4bc5-9d9d-adcf8dc4c6eb",
+ "metadata": {},
+ "source": [
+ "## Part 1: Read in case info (in the format (YAML) that MDTF generates for the run)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "844aed9c-5407-492f-97ee-aaa7f995fb7e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Debugging: remove following line in final PR\n",
+ "os.environ[\"WORK_DIR\"] = \"/nbhome/a1r/wkdir/example_multicase\"\n",
+ "os.environ[\"case_env_file\"] = \"/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/case_info.yaml\"\n",
+ "os.environ['CATALOG_FILE'] = \"/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json\" \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "cdd05af0-451b-440a-8551-a189d7d42bbe",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Receive a dictionary of case information from the framework\n",
+ "case_env_file = os.environ[\"case_env_file\"]\n",
+ "assert(os.path.isfile(case_env_file)), f\"case environment file {case_env_file} not found\"\n",
+ "with open(case_env_file, 'r') as stream:\n",
+ " try:\n",
+ " case_info = yaml.safe_load(stream)\n",
+ " except yaml.YAMLError as exc:\n",
+ " print(exc)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "bce6e34a-507a-45a9-ad5f-99a3a6b9e180",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cat_def_file = case_info['CATALOG_FILE']\n",
+ "case_list = case_info['CASE_LIST']\n",
+ "# all cases share variable names and dimension coords, so just get first result for each\n",
+ "tas_var = [case['tas_var'] for case in case_list.values()][0]\n",
+ "time_coord = [case['time_coord'] for case in case_list.values()][0]\n",
+ "lat_coord = [case['lat_coord'] for case in case_list.values()][0]\n",
+ "lon_coord = [case['lon_coord'] for case in case_list.values()][0]\n",
+ "# open the csv file using information provided by the catalog definition file\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8492bc06-f196-4037-9a42-52791ef170be",
+ "metadata": {},
+ "source": [
+ "## What is in the data catalog? "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "e24a27f2-8a92-4793-af49-b11193866aec",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ " | esm_catalog_ESM4 catalog with 89 dataset(s) from 785 asset(s): \n",
+ "\n",
+ " "
+ ],
+ "text/plain": [
+ "
\n",
+ "\n",
+ " "
+ ],
+ "text/plain": [
+ " activity_id institution_id source_id experiment_id \\\n",
+ "0 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "1 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "2 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "3 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "4 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ ".. ... ... ... ... \n",
+ "780 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "781 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "782 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "783 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "784 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "\n",
+ " frequency modeling_realm table_id member_id grid_label variable_id \\\n",
+ "0 3hr atmos_cmip NaN NaN NaN pr \n",
+ "1 3hr atmos_cmip NaN NaN NaN rlut \n",
+ "2 3hr atmos_cmip NaN NaN NaN pr \n",
+ "3 3hr atmos_cmip NaN NaN NaN rlut \n",
+ "4 3hr atmos_cmip NaN NaN NaN pr \n",
+ ".. ... ... ... ... ... ... \n",
+ "780 daily atmos_cmip NaN NaN NaN zg500 \n",
+ "781 daily atmos_cmip NaN NaN NaN hurs \n",
+ "782 daily atmos_cmip NaN NaN NaN huss \n",
+ "783 daily atmos_cmip NaN NaN NaN pr \n",
+ "784 daily atmos_cmip NaN NaN NaN psl \n",
+ "\n",
+ " temporal_subset chunk_freq grid_label.1 platform dimensions \\\n",
+ "0 NaN NaN NaN NaN NaN \n",
+ "1 NaN NaN NaN NaN NaN \n",
+ "2 NaN NaN NaN NaN NaN \n",
+ "3 NaN NaN NaN NaN NaN \n",
+ "4 NaN NaN NaN NaN NaN \n",
+ ".. ... ... ... ... ... \n",
+ "780 NaN NaN NaN NaN NaN \n",
+ "781 NaN NaN NaN NaN NaN \n",
+ "782 NaN NaN NaN NaN NaN \n",
+ "783 NaN NaN NaN NaN NaN \n",
+ "784 NaN NaN NaN NaN NaN \n",
+ "\n",
+ " cell_methods path \n",
+ "0 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "1 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "2 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "3 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "4 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ ".. ... ... \n",
+ "780 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "781 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "782 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "783 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "784 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "\n",
+ "[785 rows x 17 columns]"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "cat.df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "79536816-c505-4b2f-8666-68fcca991af1",
+ "metadata": {},
+ "source": [
+ "## Searching for TAS DAILY output for my POD"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "cd754709-3dc7-4a4c-90c0-67a2f5672021",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
785 rows × 17 columns \n", + "esm_catalog_ESM4 catalog with 2 dataset(s) from 13 asset(s): \n",
+ "\n",
+ " "
+ ],
+ "text/plain": [
+ "
\n",
+ "\n",
+ " "
+ ],
+ "text/plain": [
+ " activity_id institution_id source_id experiment_id \\\n",
+ "0 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "1 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "2 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "3 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "4 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "5 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "6 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "7 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "8 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "9 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n",
+ "10 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "11 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "12 dev NaN NaN c384L65_am5f3b1r0_amip \n",
+ "\n",
+ " frequency modeling_realm table_id member_id grid_label variable_id \\\n",
+ "0 daily atmos_cmip NaN NaN NaN tas \n",
+ "1 daily atmos_cmip NaN NaN NaN tas \n",
+ "2 daily atmos_cmip NaN NaN NaN tas \n",
+ "3 daily atmos_cmip NaN NaN NaN tas \n",
+ "4 daily atmos_cmip NaN NaN NaN tas \n",
+ "5 daily atmos_cmip NaN NaN NaN tas \n",
+ "6 daily atmos_cmip NaN NaN NaN tas \n",
+ "7 daily atmos_cmip NaN NaN NaN tas \n",
+ "8 daily atmos_cmip NaN NaN NaN tas \n",
+ "9 daily atmos_cmip NaN NaN NaN tas \n",
+ "10 daily atmos_cmip NaN NaN NaN tas \n",
+ "11 daily atmos_cmip NaN NaN NaN tas \n",
+ "12 daily atmos_cmip NaN NaN NaN tas \n",
+ "\n",
+ " temporal_subset chunk_freq grid_label.1 platform dimensions \\\n",
+ "0 NaN NaN NaN NaN NaN \n",
+ "1 NaN NaN NaN NaN NaN \n",
+ "2 NaN NaN NaN NaN NaN \n",
+ "3 NaN NaN NaN NaN NaN \n",
+ "4 NaN NaN NaN NaN NaN \n",
+ "5 NaN NaN NaN NaN NaN \n",
+ "6 NaN NaN NaN NaN NaN \n",
+ "7 NaN NaN NaN NaN NaN \n",
+ "8 NaN NaN NaN NaN NaN \n",
+ "9 NaN NaN NaN NaN NaN \n",
+ "10 NaN NaN NaN NaN NaN \n",
+ "11 NaN NaN NaN NaN NaN \n",
+ "12 NaN NaN NaN NaN NaN \n",
+ "\n",
+ " cell_methods path \n",
+ "0 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "1 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "2 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "3 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "4 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "5 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "6 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "7 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "8 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "9 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n",
+ "10 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "11 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n",
+ "12 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... "
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "tas_subset.df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "0049f883-2870-4e45-bb98-75c200b0ed72",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "--> The keys in the returned dictionary of datasets are constructed as follows:\n",
+ "\t'experiment_id.frequency.modeling_realm.variable_id'\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " 100.00% [2/2 04:06<00:00]\n",
+ " \n",
+ " "
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "\n",
+ "# Part 3: Make a plot that contains results from each case\n",
+ "# --------------------------------------------------------\n",
+ "print(\"Let's plot!\")\n",
+ "print(\"--------------------------------------\")\n",
+ "\n",
+ "# set up the figure\n",
+ "fig = plt.figure(figsize=(12, 4))\n",
+ "ax = plt.subplot(1, 1, 1)\n",
+ "\n",
+ "# loop over cases\n",
+ "for k, v in tas_arrays.items():\n",
+ " v.plot(ax=ax, label=k)\n",
+ "\n",
+ "# add legend\n",
+ "plt.legend()\n",
+ "\n",
+ "# add title\n",
+ "plt.title(\"Zonal Mean Surface Air Temperature Anomaly\")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "d97c51b5-4c3a-4242-913e-efba9b05d38d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f0dc40a5-ae0a-4cbd-bd25-f55462cfa46b",
+ "metadata": {},
+ "source": [
+ "## Save the plots in work directory "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "63556ef5-07dd-4cad-a050-08bbd9c15d4a",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Last log message by example_multicase POD: finished successfully!\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ " "
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "\n",
+ "# save the plot in the right location\n",
+ "work_dir = os.environ[\"WORK_DIR\"]\n",
+ "assert os.path.isdir(f\"{work_dir}/model/PS\"), f'Assertion error: {work_dir}/model/PS not found'\n",
+ "\n",
+ "plt.savefig(f\"{work_dir}/model/PS/example_model_plot.eps\", bbox_inches=\"tight\")\n",
+ "\n",
+ "# Part 4: Close the catalog files and\n",
+ "# release variable dict reference for garbage collection\n",
+ "# ------------------------------------------------------\n",
+ "cat.close()\n",
+ "tas_dict = None\n",
+ "\n",
+ "\n",
+ "# Part 5: Confirm POD executed successfully\n",
+ "# ----------------------------------------\n",
+ "print(\"Last log message by example_multicase POD: finished successfully!\")\n",
+ "sys.exit(0)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ec735c6a-6761-47cd-92e4-6904e7e939eb",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "intake",
+ "language": "python",
+ "name": "intake"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/diagnostics/example_multicase/multirun_config_demo1.jsonc b/diagnostics/example_multicase/multirun_config_demo1.jsonc
new file mode 100644
index 000000000..b7fa95752
--- /dev/null
+++ b/diagnostics/example_multicase/multirun_config_demo1.jsonc
@@ -0,0 +1,89 @@
+{
+ "pod_list" : [
+ "example_multicase"
+ ],
+ "case_list":
+ {
+ "c384L65_am5f3b1r0_amip":
+ {
+ "model" : "test",
+ "convention" : "CMIP",
+ "startdate" : 19810101,
+ "enddate" : 19821231
+ },
+ "c384L65_am5f3b1r0_amip":
+ {
+ "model" : "test",
+ "convention" : "CMIP",
+ "startdate" : 19810101,
+ "enddate" : 19821231
+ }
+ },
+ // PATHS ---------------------------------------------------------------------
+ // Location of supporting data downloaded when the framework was installed.
+ // If a relative path is given, it's resolved relative to the MDTF-diagnostics
+ // code directory. Environment variables (eg, $HOME) can be referenced with a
+ // "$" and will be expended to their current values when the framework runs.
+ // Full path to model data ESM-intake catalog header file
+ "DATA_CATALOG":"/home/a1r/github/MDTF-diagnostics/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.json",
+
+ // Backwards compatibility
+ "MODEL_DATA_ROOT": "../mdtf_test_data",
+
+ // Parent directory containing observational data used by individual PODs.
+ "OBS_DATA_ROOT": "../inputdata/obs_data",
+
+ // Working directory.
+ "WORK_DIR": "../wkdir",
+
+ // Directory to write output. The results of each run of the framework will be
+ // put in a subdirectory of this directory. Defaults to WORKING_DIR if blank.
+ "OUTPUT_DIR": "../wkdir",
+
+ // Location of the Anaconda/miniconda or micromamba installation to use for managing
+ // dependencies (path returned by running `[conda | micromamba] info`.) If empty,
+ // framework will attempt to determine location of system's conda installation.
+ "conda_root": "~/.local/bin",
+
+ // Directory containing the framework-specific conda environments. This should
+ // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left
+ // blank, the framework will look for its environments in conda_root/envs
+ "conda_env_root": "~/miniconda3/envs",
+
+ // Path to micromamba executable if using micromamba
+ "micromamba_exe": "",
+
+ // SETTINGS ------------------------------------------------------------------
+ // Any command-line option recognized by the mdtf script
+ // can be set here, in the form "flag name": "desired setting".
+
+ // Settings affecting what output is generated:
+ // Set to true to run the preprocessor; default true:
+ "run_pp": true,
+
+ // Set to true to perform data translation; default false:
+ "translate_data": true,
+
+ // Set to true to have PODs save postscript figures in addition to bitmaps.
+ "save_ps": false,
+
+ // Set to true for files > 4 GB
+ "large_file": false,
+
+ // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs
+ // run to completion
+ "save_pp_data": true,
+
+ // Set to true to save HTML and bitmap plots in a .tar file.
+ "make_variab_tar": false,
+
+ // Generate html output for multiple figures per case
+ "make_multicase_figure_html": false,
+
+ // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved
+ // under a unique name.
+ "overwrite": false,
+ // List with custom preprocessing script(s) to run on data
+ // Place these scripts in the user_scripts directory of your copy of the MDTF-diagnostics repository
+ "user_pp_scripts" : []
+}
diff --git a/diagnostics/example_multicase/multirun_config_demo2.jsonc b/diagnostics/example_multicase/multirun_config_demo2.jsonc
new file mode 100644
index 000000000..5e2517b49
--- /dev/null
+++ b/diagnostics/example_multicase/multirun_config_demo2.jsonc
@@ -0,0 +1,90 @@
+{
+ "pod_list" : [
+ "example_multicase"
+ ],
+ "case_list" :
+ {
+ "c384L65_am5f3b1r0_amip":
+ {
+ "model" : "test",
+ "convention" : "CMIP",
+ "startdate" : 19810101,
+ "enddate" : 19821231
+ },
+ "c96L65_am5f3b1r0_pdclim1850F":
+ {
+ "model" : "test",
+ "convention" : "CMIP",
+ "startdate" : 00050101,
+ "enddate" : 00061231
+ }
+ },
+ // PATHS ---------------------------------------------------------------------
+ // Location of supporting data downloaded when the framework was installed.
+ // If a relative path is given, it's resolved relative to the MDTF-diagnostics
+ // code directory. Environment variables (eg, $HOME) can be referenced with a
+ // "$" and will be expended to their current values when the framework runs.
+ // Full path to model data ESM-intake catalog header file
+ "DATA_CATALOG": "/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json",
+
+ // Backwards compatibility
+ "MODEL_DATA_ROOT": "../mdtf_test_data",
+
+ // Parent directory containing observational data used by individual PODs.
+ "OBS_DATA_ROOT": "../inputdata/obs_data",
+
+ // Working directory.
+ "WORK_DIR": "../wkdir",
+
+ // Directory to write output. The results of each run of the framework will be
+ // put in a subdirectory of this directory. Defaults to WORKING_DIR if blank.
+ "OUTPUT_DIR": "../wkdir",
+
+ // Location of the Anaconda/miniconda or micromamba installation to use for managing
+ // dependencies (path returned by running `[conda | micromamba] info`.) If empty,
+ // framework will attempt to determine location of system's conda installation.
+ "conda_root": "~/.local/bin",
+
+
+ // Directory containing the framework-specific conda environments. This should
+ // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left
+ // blank, the framework will look for its environments in conda_root/envs
+ "conda_env_root": "~/miniconda3/envs",
+
+ // Path to micromamba executable if using micromamba
+ "micromamba_exe": "",
+
+ // SETTINGS ------------------------------------------------------------------
+ // Any command-line option recognized by the mdtf script
+ // can be set here, in the form "flag name": "desired setting".
+
+ // Settings affecting what output is generated:
+ // Set to true to run the preprocessor; default true:
+ "run_pp": true,
+
+ // Set to true to perform data translation; default false:
+ "translate_data": true,
+
+ // Set to true to have PODs save postscript figures in addition to bitmaps.
+ "save_ps": false,
+
+ // Set to true for files > 4 GB
+ "large_file": false,
+
+ // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs
+ // run to completion
+ "save_pp_data": true,
+
+ // Set to true to save HTML and bitmap plots in a .tar file.
+ "make_variab_tar": false,
+
+ // Generate html output for multiple figures per case
+ "make_multicase_figure_html": false,
+
+ // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved
+ // under a unique name.
+ "overwrite": false,
+ // List with custom preprocessing script(s) to run on data
+ // Place these scripts in the user_scripts directory of your copy of the MDTF-diagnostics repository
+ "user_pp_scripts" : []
+}
diff --git a/diagnostics/example_multicase/multirun_config_template.jsonc b/diagnostics/example_multicase/multirun_config_template.jsonc
index ad5a33d94..2e66b93ae 100644
--- a/diagnostics/example_multicase/multirun_config_template.jsonc
+++ b/diagnostics/example_multicase/multirun_config_template.jsonc
@@ -25,38 +25,41 @@
"pod_list" : [
"example_multicase"
],
- // Each CASENAME corresponds to a different simulation/output dataset
- "case_list" : [
- {
- "CASENAME" : "CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231",
- "model" : "test",
- "convention" : "CMIP",
- "FIRSTYR" : 1980,
- "LASTYR" : 1984
- },
- {
- "CASENAME": "CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231",
- "model" : "test",
- "convention" : "CMIP",
- "FIRSTYR" : 1985,
- "LASTYR" : 1989
- }
- ],
+ // Each case corresponds to a different simulation/output dataset
+ "case_list":
+ {
+ "CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231":
+ {
+ "model": "test",
+ "convention": "CMIP",
+ "startdate": "19800101",
+ "enddate": "19841231"
+ },
+ "CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231":
+ {
+ "model": "test",
+ "convention": "CMIP",
+ "startdate": "19850101",
+ "enddate": "19891231"
+ }
+ },
+
// PATHS ---------------------------------------------------------------------
// Location of supporting data downloaded when the framework was installed.
-
// If a relative path is given, it's resolved relative to the MDTF-diagnostics
// code directory. Environment variables (eg, $HOME) can be referenced with a
// "$" and will be expended to their current values when the framework runs.
+ // Full path to model data ESM-intake catalog header file
+ "DATA_CATALOG": "/net/jml/mdtf/MDTF-diagnostics/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json",
+
+ // Backwards compatibility
+ "MODEL_DATA_ROOT": "../inputdata/mdtf_test_data",
// Parent directory containing observational data used by individual PODs.
"OBS_DATA_ROOT": "../inputdata/obs_data",
- // Parent directory containing results from different models.
- "MODEL_DATA_ROOT": "../mdtf_test_data",
-
// Working directory.
- "WORKING_DIR": "../wkdir",
+ "WORK_DIR": "../wkdir",
// Directory to write output. The results of each run of the framework will be
// put in a subdirectory of this directory. Defaults to WORKING_DIR if blank.
@@ -65,56 +68,47 @@
// Location of the Anaconda/miniconda or micromamba installation to use for managing
// dependencies (path returned by running `[conda | micromamba] info`.) If empty,
// framework will attempt to determine location of system's conda installation.
- "conda_root": "",
+ "conda_root": "/net/jml/miniconda3",
// Directory containing the framework-specific conda environments. This should
// be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left
- // blank, the framework will look for its environments in the system default
- // location.
- "conda_env_root": "",
+ // blank, the framework will look for its environments in conda_root/envs
+ "conda_env_root": "/net/jml/miniconda3/envs",
// Path to micromamba executable if using micromamba
"micromamba_exe": "",
+
// SETTINGS ------------------------------------------------------------------
// Any command-line option recognized by the mdtf script (type `mdtf --help`)
// can be set here, in the form "flag name": "desired setting".
- //
- // Type of data for the framework to process: `single_run` (default)
- // for PODs that compare output from a single simulation to observational data
- // or `multi_run` for PODs that analyze output from multiple simulations and/or
- // observational datasets
- "data_type": "multi_run",
-
- // Method used to fetch model data.
- "data_manager": "Local_File",
-
- // Method used to manage dependencies.
- "environment_manager": "Conda",
// Settings affecting what output is generated:
+ // Set to true to run the preprocessor; default true:
+ "run_pp": true,
+
+ // Set to true to perform data translation; default false:
+ "translate_data": true,
// Set to true to have PODs save postscript figures in addition to bitmaps.
"save_ps": false,
- // Set to true to have PODs save netCDF files of processed data.
- "save_nc": false,
+ // Set to true for files > 4 GB
+ "large_file": false,
+
+ // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs
+ // run to completion
+ "save_pp_data": true,
// Set to true to save HTML and bitmap plots in a .tar file.
"make_variab_tar": false,
+ // Generate html output for multiple figures per case
+ "make_multicase_figure_html": false,
+
// Set to true to overwrite results in OUTPUT_DIR; otherwise results saved
// under a unique name.
"overwrite": false,
-
- // Settings used in debugging:
-
- // Log verbosity level.
- "verbose": 1,
-
- // Set to true for framework test. Data is fetched but PODs are not run.
- "test_mode": false,
-
- // Set to true for framework test. No external commands are run and no remote
- // data is copied. Implies test_mode.
- "dry_run": false
+ // List with custom preprocessing script(s) to run on data
+ // Place these scripts in the user_scripts directory of your copy of the MDTF-diagnostics repository
+ "user_pp_scripts" : ["example_pp_script.py"]
}
diff --git a/diagnostics/example_multicase/settings.jsonc b/diagnostics/example_multicase/settings.jsonc
index f4f456bcb..13325ff6f 100644
--- a/diagnostics/example_multicase/settings.jsonc
+++ b/diagnostics/example_multicase/settings.jsonc
@@ -17,7 +17,7 @@
"description" : "Example diagnostic with multiple cases",
"driver" : "example_multicase.py",
"long_name" : "Example diagnostic with multiple cases",
- "realm" : "atmos",
+ "convention": "cmip",
"runtime_requirements": {
"python3": ["matplotlib", "xarray", "netCDF4"]
}
@@ -25,8 +25,16 @@
// Variable Coordinates
"dimensions": {
- "lat": {"standard_name": "latitude"},
- "lon": {"standard_name": "longitude"},
+ "lat": {
+ "standard_name": "latitude",
+ "units": "degrees_north",
+ "axis": "Y"
+ },
+ "lon": {
+ "standard_name": "longitude",
+ "units": "degrees_east",
+ "axis": "X"
+ },
"time": {"standard_name": "time"}
},
@@ -34,6 +42,7 @@
"varlist" : {
"tas": {
"frequency" : "day",
+ "realm": "atmos",
"dimensions": ["time", "lat", "lon"],
"modifier": "atmos_height",
"standard_name" : "air_temperature",
diff --git a/diagnostics/example_notebook/Test_Notebook.ipynb b/diagnostics/example_notebook/Test_Notebook.ipynb
new file mode 100644
index 000000000..e9816becc
--- /dev/null
+++ b/diagnostics/example_notebook/Test_Notebook.ipynb
@@ -0,0 +1,210 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "eed42503-3bd5-4804-96b3-83e685e46038",
+ "metadata": {},
+ "source": [
+ "# Test Notebook"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cc759e47-853d-4ddf-8395-ea977bf95625",
+ "metadata": {},
+ "source": [
+ "Tests the core MAR functionality."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5de0d861",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "9a00156b-27c4-4edc-b006-8b358ca52954",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import yaml"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7d1c2d10",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "case_env_file = os.environ['case_env_file']\n",
+ "with open(case_env_file, 'r') as stream:\n",
+ " try:\n",
+ " case_info = yaml.safe_load(stream)\n",
+ " except yaml.YAMLError as exc:\n",
+ " print(exc)\n",
+ "\n",
+ "\n",
+ "cat_def_file = case_info['CATALOG_FILE']\n",
+ "case_list = case_info['CASE_LIST']\n",
+ "# all cases share variable names and dimension coords, so just get first result for each\n",
+ "tas_var = [case['tas_var'] for case in case_list.values()][0]\n",
+ "time_coord = [case['time_coord'] for case in case_list.values()][0]\n",
+ "lat_coord = [case['lat_coord'] for case in case_list.values()][0]\n",
+ "lon_coord = [case['lon_coord'] for case in case_list.values()][0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "06350542-190e-4e11-9868-39ac187acf10",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "config = {\n",
+ " \"dora_id\": \"odiv-2\",\n",
+ " \"pathPP\": \"/path/to/some/experiment\"\n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "06d87e68-d4c5-4e85-b036-93da28e88d37",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "for k, v in config.items():\n",
+ " config[k] = (\n",
+ " os.environ[f\"MAR_{k.upper()}\"]\n",
+ " if f\"MAR_{k.upper()}\" in os.environ.keys()\n",
+ " else v\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "41b887a3-977a-43b7-991f-4f84bad93c99",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'dora_id': 'odiv-2', 'pathPP': '/path/to/some/experiment'}\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(config)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "f5f3f13c-c770-43ec-a3a0-47e70d49489a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.linspace(1,100)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "51b5e7f6-941f-472e-98b2-a8413e9186e7",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Text(0.02, 0.9, 'odiv-2')"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAABV0UlEQVR4nO3dd3QV5f7+/fdO2+mBENIghNB7SEClKaIIIoII0sJRPCqPhVAOYMEGNsCGiqDH4/FgI4AFBBQLqDRRgRR6J0CAhNDSIH3fzx9+3T8jNZCwU67XWrMWM3PP7M/cbJnLuWf2WIwxBhEREZEKxMnRBYiIiIj8nQKKiIiIVDgKKCIiIlLhKKCIiIhIhaOAIiIiIhWOAoqIiIhUOAooIiIiUuEooIiIiEiF4+LoAi6HzWbjyJEj+Pj4YLFYHF2OiIiIXAJjDNnZ2YSGhuLkdOFrJJUyoBw5coSwsDBHlyEiIiKXISUlhbp1616wTaUMKD4+PsAfB+jr6+vgakRERORSZGVlERYWZj+PX0ilDCh/Duv4+voqoIiIiFQyl3J7hm6SFRERkQpHAUVEREQqHAUUERERqXAUUERERKTCUUApY/v378disZCUlOToUuQKrFixAovFQkZGhqNLERGplqpVQLn33nt54oknynR//fr1u+ztV6xYQUhICMYYbrzxRsaOHXtFtZTlsZWXDz/8kA4dOji6jIvq1KkTqamp+Pn5ObqUK/6eiYhURpXyMePLYbPZ+Oabb1i8eLGjS7FbvHgxffv2veJfw62Ix3Y+ixcv5o477nB0GRdUWFiIm5sbwcHBji5FRKT6MpVQZmamAUxmZuYlb7Nq1SoTGBhoiouLTXJysgHM3LlzTceOHY3VajUtWrQwP//8s719UVGRue+++0z9+vWNu7u7adKkiXnzzTft6ydNmmSAEtPPP/9s3/eXX35pbrzxRuPh4WHatGlj1q5de1ZNDRs2NF9//bUZPnz4WftKTk42xhizYsUKc8011xg3NzcTHBxsHn/8cVNYWHjeY8vPzzcjR440wcHBxmq1mvDwcDNlyhR72wMHDpi+ffsaLy8v4+PjYwYOHGjS0tJKHFdkZKT54IMPTFhYmPHy8jIPPfSQKSoqMi+//LIJCgoytWvXNi+++GKJGjIyMsyIESNM7dq1jY+Pj+nWrZtJSkoq0SY3N9d4eXmZLVu2GGOMyc/PN48++qgJDQ01np6e5tprr7X/HeTm5poWLVqYESNG2Lfft2+f8fX1Nf/5z3+MMcbMnj3b+Pn5mYULF5rGjRsbq9Vqunfvbg4ePFjicxcvXmyio6ON1Wo1ERERZvLkySX6EDDvvvuu6du3r/H09DTPPvus+fnnnw1gTp06VeKzlixZYpo0aWI8PDzMgAEDTE5Ojvnwww9NeHi4qVGjhomNjTVFRUX2fV/oGP+63++++840a9bMeHl5mZ49e5ojR45c8HsmIlIZleb8XW0CyoQJE8z9999vjDH2EFG3bl3zxRdfmG3btpkHHnjA+Pj4mOPHjxtjjCkoKDDPPvusWbdundm3b5/59NNPjaenp5k/f74xxpjs7GwzaNAgc+utt5rU1FSTmppq8vPz7ftu1qyZ+frrr83OnTvNXXfdZcLDw0ucFLds2WK8vLxMbm6uycjIMB07djQjRoyw76uoqMgcOnTIeHp6mkceecRs377dLFy40AQEBJhJkyad99heffVVExYWZlatWmX2799vVq9ebeLi4owxxthsNhMVFWW6dOliNmzYYH777TcTHR1tunbtat/XpEmTjLe3t7nrrrvM1q1bzeLFi42bm5vp2bOnGTVqlNmxY4f53//+ZwDz66+/2vfbuXNn06dPH7N+/Xqza9cuM378eFOrVi1z4sQJ+76//vpr07BhQ/t8TEyM6dSpk1m1apXZs2ePefXVV43VajW7du0yxhiTmJho3NzczMKFC01RUZHp3LmzueOOO+zbz54927i6upr27dubtWvXmg0bNphrr73WdOrUyd7mu+++M76+vubDDz80e/fuNT/88IOpX7++mTx5sr0NYAIDA80HH3xg9u7da/bv33/OgOLq6mpuueUWk5CQYFauXGlq1aplevToYQYNGmS2bt1qlixZYtzc3My8efMu+Rj/3G/37t3N+vXrTXx8vGnevLmJiYm54PdMRKQyKreAMmXKFNO+fXvj7e1tateube644w6zY8eOEm1sNpuZNGmSCQkJMe7u7qZr1672/2P+U15enomNjTW1atUynp6epk+fPiYlJeWS67icgNKkSROzePFiY8z/CyjTpk2zry8sLDR169Y1L7/88nn38cgjj5gBAwbY54cPH17ihPnXff/3v/+1L9u6dasBzPbt2+3LXnrpJdO/f3/7fNeuXc2YMWNK7OvJJ580TZs2NTabzb5s1qxZxtvb2xQXF5/z2EaNGmVuuummEtv86YcffjDOzs4lrjD8Wdu6deuMMX8EFE9PT5OVlWVv07NnT1O/fv0Sn9m0aVMzdepUY4wxP/74o/H19TV5eXklPq9hw4bmvffes8+PGDHCjBs3zhhjzJ49e4zFYjGHDx8usc3NN99sJk6caJ9/5ZVXTEBAgBk1apQJDg42x44ds6+bPXu2Acxvv/1mX7Z9+3YDmN9//90YY8z1119f4gqSMcZ88sknJiQkxD4PmLFjx5Zoc66AApg9e/bY2zz44IPG09PTZGdnl+irBx988JKP8Vz7nTVrlgkKCrLPn+t7JiJSGZXm/F2qm2RXrlzJyJEj+e2331i2bBlFRUX06NGD06dP29u88sorTJ8+nZkzZ7J+/XqCg4O55ZZbyM7OtrcZO3YsCxcuZN68eaxZs4acnBxuv/12iouLS1POJdu+fTuHDh2ie/fuJZZ37NjR/mcXFxfat2/P9u3b7cv+/e9/0759e2rXro23tzfvv/8+Bw8evKTPbNOmjf3PISEhAKSnp9uXLVq0iL59+1607o4dO5a4R6Vz587k5ORw6NChcx7bvffeS1JSEk2bNmX06NH88MMPJfYXFhZW4kWLLVq0oEaNGiWOu379+iXekxAUFESLFi1KvHkyKCjIfjzx8fHk5ORQq1YtvL297VNycjJ79+4F/niD5ZIlS+zHnJCQgDGGJk2alNhm5cqV9m0Axo8fT9OmTXn77beZPXs2AQEBJfroz7+3PzVr1qzE8cTHx/P888+X+IwRI0aQmprKmTNn7Nv9dR/n4+npScOGDUv0Qf369fH29j5nv1zqMf59vyEhISW+KyIi1VGpbpL97rvvSszPnj2bwMBA4uPjueGGGzDG8Oabb/LUU0/Rv39/AD766COCgoKIi4vjwQcfJDMzkw8++IBPPvnEflL99NNPCQsLY/ny5fTs2bOMDu3/Wbx4MbfccgseHh4XbftnGPjss8/417/+xeuvv07Hjh3x8fHh1Vdf5ffff7+kz3R1dT1rnzabDYC0tDQSEhLo3bv3BfdhjDnrBlpjTIl9/v3YoqOjSU5O5ttvv2X58uUMGjSI7t2788UXX5xzf+f6nL/W/udnnWvZn8djs9kICQlhxYoVZ+27Ro0aAKxbt46CggK6dOli38bZ2Zn4+HicnZ1LbPPXE356ejo7d+7E2dmZ3bt3c+utt571Gec6pr/2+XPPPWf/Pv6Vu7u7/c9eXl5nrf+7y+mXSznGc+3jz79nEZHq6oqe4snMzATA398fgOTkZNLS0ujRo4e9jdVqpWvXrqxdu5YHH3yQ+Ph4CgsLS7QJDQ2lVatWrF279pwBJT8/n/z8fPt8VlZWqepctGgRDzzwwFnLf/vtN2644QYAioqKiI+PJzY2FoDVq1fTqVMnHnnkEXv7v/5fL4Cbm9tlXfVZvHgxHTt2LHE14Fz7atGiBV9++WWJALF27Vp8fHyoU6fOeY/N19eXwYMHM3jwYO666y5uvfVWTp48SYsWLTh48CApKSn2qyjbtm0jMzOT5s2bl/o4/hQdHU1aWhouLi7Ur1//nG0WLVpE79697SfqqKgoiouLSU9P5/rrrz/vvu+77z5atWrFiBEjuP/++7n55ptp0aKFfX1RUREbNmzg2muvBWDnzp1kZGTQrFkze207d+6kUaNGl318l+tSj/FiLvd7JiJyOfIKi3luyVai69VkYPuwi29QTi77d1CMMYwbN44uXbrQqlUr4I8rA/DHZe6/CgoKsq9LS0vDzc2NmjVrnrfN302dOhU/Pz/79NchiotJT09n/fr13H777WetmzVrFgsXLmTHjh2MHDmSU6dOcd999wHQqFEjNmzYwPfff8+uXbt45plnWL9+fYnt69evz6ZNm9i5cyfHjx+nsLDwkmo616O29evX5/fff2f//v0cP34cm83GI488QkpKCqNGjWLHjh0sWrSISZMmMW7cOJycnM55bG+88Qbz5s1jx44d7Nq1i88//5zg4GBq1KhB9+7dadOmDcOGDSMhIYF169Zxzz330LVr10sa4jif7t2707FjR/r168f333/P/v37Wbt2LU8//TQbNmw45zE3adKEYcOGcc8997BgwQKSk5NZv349L7/8MkuXLgX++Pv59ddf+fjjj4mJieGuu+5i2LBhFBQU2Pfj6urKqFGj+P3330lISOCf//wnHTp0sAeWZ599lo8//pjJkyezdetWtm/fzvz583n66acv+3gv1aUc46W43O+ZiEhp7UnPod+sX5i7LoXJi7eScabg4huVk8sOKLGxsWzatIm5c+eete5cwxIX+62PC7WZOHEimZmZ9iklJeWS61yyZAnXXXcdgYGBZ62bNm0aL7/8MpGRkaxevZpFixbZr2o89NBD9O/fn8GDB3Pddddx4sSJEldTAEaMGEHTpk3t96n88ssvF63n9OnT/Pjjj2fdfzJhwgScnZ1p0aIFtWvX5uDBg9SpU4elS5eybt06IiMjeeihh7j//vvtJ9dzHZu3tzcvv/wy7du355prrmH//v0sXboUJycnLBYLX331FTVr1uSGG26ge/fuNGjQgPnz519yf56LxWJh6dKl3HDDDdx33300adKEIUOGsH//foKCgti7dy979uw56+rY7Nmzueeee+z3mfTt25fff/+dsLAwduzYwaOPPso777xjD6SzZs0iIyODZ555xr4PT09PHn/8cWJiYujYsSMeHh7MmzfPvr5nz558/fXXLFu2jGuuuYYOHTowffp0wsPDr+iYL9WFjvFSXc73TESktL6MP0Sft9ewIy2bAG8r793dnhqebo4r6HLuwo2NjTV169Y1+/btK7F87969BjAJCQkllvft29fcc889xpg/nvgAzMmTJ0u0adOmjXn22Wcv6fNLcxdwnz59znoy588nbRITEy/p88rSl19+aZo3b14m+zrXsVVEr7/+uunVq1eZ7/fP3xAREZHLdzq/0Iz/LMmEP/61CX/8azP0P7+ao1m55fJZ5fYUjzGG2NhYFixYwE8//URERESJ9REREQQHB7Ns2TL7soKCAlauXEmnTp0AaNeuHa6uriXapKamsmXLFnubstSlSxeGDh1a5vu9XH9e4SgLFe3Yzqdu3bpMnDjR0WWIiMjf7DqazR0zf+GL+EM4WeBf3Zvwyf3XEejjfvGNy1mpbpIdOXIkcXFxLFq0CB8fH/s9I35+fnh4eGCxWBg7dixTpkyhcePGNG7cmClTpuDp6UlMTIy97f3338/48eOpVasW/v7+TJgwgdatW5/1GHBZeOyxx8p8n1firzcHX6mKdmznM2jQIEeXICIif2GM4bMNKUxavJW8QhuBPlbeGhJFx4a1HF2ancWYS3+e8Xz3iMyePZt7770X+OOgn3vuOd577z1OnTrFddddx6xZs+w30gLk5eXx6KOPEhcXR25uLjfffHOJew0uJisrCz8/PzIzM/H19b3U8kVERKq9nPwinl64ma+SjgBwfeMA3hjclgBva7l/dmnO36UKKBWFAoqIiEjpbTuSRWxcAvuOn8bZycK4W5rwcNeGODld2UtrL1Vpzt/V5m3GIiIi1ZUxhrh1B3luyTYKimwE+7rzdkwU19T3d3Rp56WAIiIiUoVl5xXyxILNfLMpFYBuTWvz+qC2+Hs58BHiS6CAIiIiUkVtOZzJyLgEDpw4g4uThcdubcoDXRpctSGdK3HZP9RW1a1YsQKLxUJGRgYAH374of29MiIiIhWZMYYPf0mm/ztrOXDiDHVqePDZQx35/264evebXCkFlEs0ePBgdu3adUX7mDp1Ktdccw0+Pj4EBgbSr18/du7cWUYVioiIQGZuIQ9/msDkJdsoKLZxS4sglo6+nuh6NS++cQWigHKJPDw8zvlz+aWxcuVKRo4cyW+//cayZcsoKiqiR48enD59uoyqFBGR6iwpJYPeM1bz3dY0XJ0tPHt7C/5zdzv8PF0vvnEFU20CSn5+PqNHjyYwMBB3d3e6dOlS4uV/S5cupUmTJnh4eNCtWzf2799fYvu/DvHs3LkTi8XCjh07SrSZPn069evX53xPbn/33Xfce++9tGzZksjISGbPns3BgweJj48v02MVEZHqxRjDf1fvY+C/13LoVC71/D358uFO3Ncl4qLvwquoqk1Aeeyxx/jyyy/56KOPSEhIoFGjRvTs2ZOTJ0+SkpJC//79ue2220hKSuKBBx7giSeeOO++mjZtSrt27ZgzZ06J5XFxccTExFzylyEzMxMAf/+K+5iXiIhUbBlnChjx8QZe/GY7hcWG21oH8/XoLrSpW8PRpV2RavEUz+nTp3n33Xf58MMP6dWrFwDvv/8+y5Yt44MPPuDUqVM0aNCAN954A4vFQtOmTdm8efMF35kzbNgwZs6cyQsvvADArl27iI+P5+OPP76kmowxjBs3ji5dupT4lV0REZFLFX/gJKPiEjmSmYebixPP9G7OPzqEV9qrJn9VLa6g7N27l8LCQjp37mxf5urqyrXXXsv27dvZvn07HTp0KPEX2rFjxwvuc8iQIRw4cIDffvsNgDlz5tC2bVtatGjB6tWr8fb2tk9/v9ICEBsby6ZNm5g7d24ZHaWIiFQXNpvh3RV7GfTebxzJzCMiwIuFj3Ti7o71q0Q4gWpyBeXPe0L+/pdmjMFisZz3npELCQkJoVu3bsTFxdGhQwfmzp3Lgw8+CED79u1JSkqytw0KCiqx7ahRo1i8eDGrVq2ibt26pf5sERGpvk7k5DP+842s2HkMgL6RoUzp3xpva9U6pVeLKyiNGjXCzc2NNWvW2JcVFhayYcMGmjdvTosWLexXQv709/lzGTZsGPPnz+fXX39l7969DBkyBPjjiZ9GjRrZJx8fH+CPQBQbG8uCBQv46aefiIiIKMOjFBGRqu73fSe4bcZqVuw8htXFian9W/PWkLZVLpxANbmC4uXlxcMPP8yjjz6Kv78/9erV45VXXuHMmTPcf//9ZGdn8/rrrzNu3DgefPBB4uPj+fDDDy+63/79+/Pwww/z8MMP061bN+rUqXPB9iNHjiQuLo5Fixbh4+NDWloaAH5+fnh4eJTFoYqISBVUbDO8u2IP05ftwmagYW0vZg2Lpllw1X1hbrW4ggIwbdo0BgwYwN133010dDR79uzh+++/p2bNmtSrV48vv/ySJUuWEBkZyb///W+mTJly0X36+vrSp08fNm7cyLBhwy7a/t133yUzM5Mbb7yRkJAQ+zR//vyyOEQREamCjmXnM/x/63jthz/CSf/oOiyO7VKlwwmAxVzODRgOVprXNYuIiFRWa/ccZ8z8JI5l5+Ph6szzd7RkYPswR5d12Upz/q4WQzwiIiKVSbHN8NaPu3n7p90YA02CvJkVE03jIB9Hl3bVKKCIiIhUIEez8hgzL5Hf9p0EYHD7MCb3bYmHm7ODK7u6FFBEREQqiFW7jvGv+UmcOF2Ap5szU+5sTb+oCz+AUVUpoIiIiDhYUbGNN5bv4p0VezEGmgX7MGtYNA1rezu6NIdRQBEREXGg1MxcRs9NZP3+UwAMu64ez9zeAnfX6jWk83cKKCIiIg7y8450xn2WxKkzhXhbXZjavzV9IkMdXVaFoIAiIiJylRUW23jt+528t2ofAC1DfZkVE039AC8HV1ZxKKCIiIhcRYdOnWHU3EQSD2YAcG+n+ky8rRlWl+o9pPN3CigiIiJXyQ9b03j0i01k5hbi4+7Cq3e14dZWIY4uq0JSQBERESlnBUU2pn27g//9kgxAZF0/ZsZEE+bv6eDKKi4FFBERkXKUcvIMsXEJbDyUCcADXSJ47NZmuLlUm9fhXRYFFBERkXLy7eZUHvtyE9l5Rfh5uPL6wEi6twhydFmVggKKiIhIGcsrLGbK0u18/OsBAKLr1eDtmGjq1PBwcGWVhwKKiIhIGdp//DQj4xLYeiQLgAe7NmBCj6a4OmtIpzQUUERERMrI4o1HeHLBZnLyi/D3cuP1QZF0axro6LIqJQUUERGRK5RXWMxzS7Yxd91BAK6t78+MoVEE+7k7uLLKSwFFRETkCuxJzyE2LoEdadlYLBDbrRFjbm6Mi4Z0rkipe2/VqlX06dOH0NBQLBYLX331VYn1FovlnNOrr75qb3PjjTeetX7IkCFXfDAiIiJX04KEQ/SduYYdadkEeLvx8X3XMr5HU4WTMlDqKyinT58mMjKSf/7znwwYMOCs9ampqSXmv/32W+6///6z2o4YMYLnn3/ePu/hoTubRUSkcjhTUMSkRVv5PP4QAB0b1OKtIW0J9NWQTlkpdUDp1asXvXr1Ou/64ODgEvOLFi2iW7duNGjQoMRyT0/Ps9qKiIhUdLuOZjNyTgK703NwssCYm5sQe1MjnJ0sji6tSinXa1BHjx7lm2++4f777z9r3Zw5cwgICKBly5ZMmDCB7Ozs8+4nPz+frKysEpOIiMjVZIzhs/Up9J25ht3pOdT2sfLpA9cxpntjhZNyUK43yX700Uf4+PjQv3//EsuHDRtGREQEwcHBbNmyhYkTJ7Jx40aWLVt2zv1MnTqV5557rjxLFREROa/T+UU8/dUWFiYeBuD6xgG8MbgtAd5WB1dWdVmMMeayN7ZYWLhwIf369Tvn+mbNmnHLLbfw9ttvX3A/8fHxtG/fnvj4eKKjo89an5+fT35+vn0+KyuLsLAwMjMz8fX1vdzyRURELmp7ahYj5ySw7/hpnCwwvkdTHu7aECddNSm1rKws/Pz8Lun8XW5XUFavXs3OnTuZP3/+RdtGR0fj6urK7t27zxlQrFYrVqtSqoiIXD3GGOLWHeS5JdsoKLIR7OvOjKFRXBvh7+jSqoVyCygffPAB7dq1IzIy8qJtt27dSmFhISEhIeVVjoiIyCXLzitk4oLNfL3pjydTuzWtzeuD2uLv5ebgyqqPUgeUnJwc9uzZY59PTk4mKSkJf39/6tWrB/xxCefzzz/n9ddfP2v7vXv3MmfOHG677TYCAgLYtm0b48ePJyoqis6dO1/BoYiIiFy5LYcziY1LYP+JM7g4WXjs1qY80KWBhnSuslIHlA0bNtCtWzf7/Lhx4wAYPnw4H374IQDz5s3DGMPQoUPP2t7NzY0ff/yRt956i5ycHMLCwujduzeTJk3C2dn5Mg9DRETkyhhj+PjXA7z0zXYKim3UqeHBjKFRtAuv6ejSqqUruknWUUpzk42IiMjFZOYW8sSXm/h2SxoAt7QI4tW72lDDU0M6ZalC3CQrIiJSGWxMySB2bgIpJ3NxdbYwsVdz/tm5PhaLhnQcSQFFRESqJWMM//tlP9O+3U5hsSHM34OZQ6OJDKvh6NIEBRQREamGMs4UMOHzTSzffhSAXq2CmTagDX4erg6uTP6kgCIiItVK/IFTjJ6byOGMXNycnXjm9ub8o0O4hnQqGAUUERGpFmw2w/ur9/Hq9zspshnq1/JkZkw0rer4Obo0OQcFFBERqfJOni5g/GdJ/LzzGAB9IkOZcmcrfNw1pFNRKaCIiEiVti75JKPnJpKWlYfVxYlJfVoy9NowDelUcAooIiJSJdlshndW7GH6sl3YDDSo7cWsmGiah+j3syoDBRQREalyjmXnM+6zJFbvPg5A/6g6vNCvFV5WnfYqC/1NiYhIlbJ2z3HGzE/iWHY+7q5OPH9HKwa2q6shnUpGAUVERKqEYpthxo+7mfHTboyBxoHevDMsmsZBPo4uTS6DAoqIiFR66Vl5jJmXxK/7TgAwqH1dnuvbCg83vYS2slJAERGRSm317mP8a34Sx3MK8HRz5qU7W3FnVF1HlyVXSAFFREQqpaJiG28u382sFXswBpoF+zAzJppGgd6OLk3KgAKKiIhUOqmZuYyZm8S6/ScBiLmuHs/e3gJ3Vw3pVBUKKCIiUqn8vCOdcZ8lcepMId5WF6b0b03fyFBHlyVlTAFFREQqhcJiG699v5P3Vu0DoFUdX2YOjaZ+gJeDK5PyoIAiIiIV3uGMXEbFJZBwMAOA4R3DebJ3c6wuGtKpqhRQRESkQlu27SgTPt9IZm4hPu4uvDKgDb1ahzi6LClnCigiIlIhFRTZePm7HXywJhmAyLp+zIyJJszf08GVydWggCIiIhVOyskzxMYlsPFQJgD3d4ng8Vub4ebi5ODK5GpRQBERkQrluy2pPPrFJrLzivDzcOW1gZHc0iLI0WXJVaaAIiIiFUJeYTFTl27no18PABBdrwZvx0RTp4aHgysTR1BAERERh9t//DQj4xLYeiQLgAe7NmBCj6a4OmtIp7pSQBEREYdavPEITy7YTE5+ETU9XZk+qC3dmgU6uixxMAUUERFxiLzCYp7/ehtxvx8E4Nr6/rw1tC0hfhrSEQUUERFxgL3Hchg5J4EdadlYLPDIjQ35V/cmuGhIR/6PAoqIiFxVCxMP8dTCLZwpKCbA2403Brfl+sa1HV2WVDAKKCIiclXkFhQzafEWPttwCICODWrx1pC2BPq6O7gyqYgUUEREpNztPprNI3MS2J2eg8UCY25uzKibGuPsZHF0aVJBKaCIiEi5+nxDCs8s2kJeoY3aPlbeGtKWTg0DHF2WVHClvhtp1apV9OnTh9DQUCwWC1999VWJ9ffeey8Wi6XE1KFDhxJt8vPzGTVqFAEBAXh5edG3b18OHTp0RQciIiIVy+n8IsZ9lsSjX2wir9DG9Y0D+HbM9QoncklKHVBOnz5NZGQkM2fOPG+bW2+9ldTUVPu0dOnSEuvHjh3LwoULmTdvHmvWrCEnJ4fbb7+d4uLi0h+BiIhUODvSsug7cw0LEg7jZIFHezblo39eS4C31dGlSSVR6iGeXr160atXrwu2sVqtBAcHn3NdZmYmH3zwAZ988gndu3cH4NNPPyUsLIzly5fTs2fP0pYkIiIVhDGGeetTmLx4K/lFNoJ93ZkxNIprI/wdXZpUMuXywPmKFSsIDAykSZMmjBgxgvT0dPu6+Ph4CgsL6dGjh31ZaGgorVq1Yu3atefcX35+PllZWSUmERGpWLLzChk9L4mJCzaTX2SjW9PaLB1zvcKJXJYyv0m2V69eDBw4kPDwcJKTk3nmmWe46aabiI+Px2q1kpaWhpubGzVr1iyxXVBQEGlpaefc59SpU3nuuefKulQRESkjWw5nEhuXwP4TZ3B2svBYz6aMuL4BTnpKRy5TmQeUwYMH2//cqlUr2rdvT3h4ON988w39+/c/73bGGCyWc3+RJ06cyLhx4+zzWVlZhIWFlV3RIiJyWYwxfPrbAV74ejsFxTbq1PBgxtAo2oXXvPjGIhdQ7o8Zh4SEEB4ezu7duwEIDg6moKCAU6dOlbiKkp6eTqdOnc65D6vVitWqG6tERCqSzNxCJi7YxNLNf1z97t48iNcGtqGGp5uDK5OqoNxfenDixAlSUlIICQkBoF27dri6urJs2TJ7m9TUVLZs2XLegCIiIhXLpkMZ3P72apZuTsPV2cLTvZvz/j3tFE6kzJT6CkpOTg579uyxzycnJ5OUlIS/vz/+/v5MnjyZAQMGEBISwv79+3nyyScJCAjgzjvvBMDPz4/777+f8ePHU6tWLfz9/ZkwYQKtW7e2P9UjIiIVkzGG//2yn2nfbqew2FC3pgczY6JpG1bD0aVJFVPqgLJhwwa6detmn//z3pDhw4fz7rvvsnnzZj7++GMyMjIICQmhW7duzJ8/Hx8fH/s2b7zxBi4uLgwaNIjc3FxuvvlmPvzwQ5ydncvgkEREpDxknClgwuebWL79KAA9Wwbxyl2R+Hm4OrgyqYosxhjj6CJKKysrCz8/PzIzM/H19XV0OSIiVV78gVOMnpvI4Yxc3JydeKp3c+7pGH7ehxtEzqU052+9i0dERM7LZjO8v3ofr36/kyKbIbyWJ7NiomlVx8/RpUkVp4AiIiLndPJ0AeM/S+LnnccAuL1NCFP7t8bHXUM6Uv4UUERE5Czrkk8yem4iaVl5uLk4MblPS4ZeG6YhHblqFFBERMTOZjO8u3Iv05ftothmaBDgxaxh0TQP0f1+cnUpoIiICADHc/L51/wkVu8+DsCdUXV4sV8rvKw6VcjVp2+diIjw694TjJmXSHp2Pu6uTjzftxUD29fVkI44jAKKiEg1VmwzvP3Tbmb8uBubgcaB3swaFk2TIJ+LbyxSjhRQRESqqfTsPMbOS2Lt3hMADGxXl+fuaImnm04N4nj6FoqIVENrdh9n7PxEjucU4OnmzIv9WtE/uq6jyxKxU0AREalGioptvLl8N7NW7MEYaBbsw8yYaBoFeju6NJESFFBERKqJtMw8Rs9LZF3ySQBirqvHs7e3wN1V70GTikcBRUSkGlixM51xn23k5OkCvK0uTOnfmr6RoY4uS+S8FFBERKqwwmIbr/+wi3+v3AtAy1BfZsVEUz/Ay8GViVyYAoqISBV1OCOX0XMTiT9wCoDhHcOZeFtzDelIpaCAIiJSBS3fdpTxn28kM7cQH3cXXhnQhl6tQxxdlsglU0AREalCCopsvPLdDv67JhmAyLp+vD00mnq1PB1cmUjpKKCIiFQRKSfPEDs3kY0pGQDc1zmCJ3o1w83FybGFiVwGBRQRkSrguy1pPPrFRrLzivDzcOW1gZHc0iLI0WWJXDYFFBGRSiy/qJgp32zno18PABBVrwZvD42ibk0N6UjlpoAiIlJJ7T9+mti5CWw5nAXAg10bMKFHU1ydNaQjlZ8CiohIJfT1piM88eVmcvKLqOnpyvRBbenWLNDRZYmUGQUUEZFKJK+wmBe+3sac3w8CcE39mswYGkWIn4eDKxMpWwooIiKVxN5jOYyck8COtGwsFnjkxob8q3sTXDSkI1WQAoqISCXwVeJhnly4mTMFxdTycuONwW25oUltR5clUm4UUEREKrDcgmImL97K/A0pAHRo4M+MIVEE+ro7uDKR8qWAIiJSQe0+ms3IuAR2Hc3BYoHRNzVm9M2NcXayOLo0kXKngCIiUgF9viGFZxdtJbewmNo+Vt4a3JZOjQIcXZbIVaOAIiJSgZzOL+KZRVtYkHAYgC6NAnhjcFtq+1gdXJnI1aWAIiJSQexIy2LknAT2HjuNkwXG3dKER25shJOGdKQaUkAREXEwYwzz1qcwefFW8otsBPlamTEkiusa1HJ0aSIOo4AiIuJA2XmFPLlwC0s2HgGga5PaTB8USS1vDelI9aaAIiLiIFsOZxIbl8D+E2dwdrLwaM+m/H/XN9CQjghQ6p8fXLVqFX369CE0NBSLxcJXX31lX1dYWMjjjz9O69at8fLyIjQ0lHvuuYcjR46U2MeNN96IxWIpMQ0ZMuSKD0ZEpDIwxvDJr/vp/85a9p84Q6ifO5892IGHujZUOBH5P6UOKKdPnyYyMpKZM2eete7MmTMkJCTwzDPPkJCQwIIFC9i1axd9+/Y9q+2IESNITU21T++9997lHYGISCWSlVfIyLgEnlm0lYJiG92bB/LN6OtpF+7v6NJEKpRSD/H06tWLXr16nXOdn58fy5YtK7Hs7bff5tprr+XgwYPUq1fPvtzT05Pg4ODSfryISKW16VAGI+MSSDmZi4uThSd6NeP+LhFYLLpqIvJ35f6GqczMTCwWCzVq1CixfM6cOQQEBNCyZUsmTJhAdnb2efeRn59PVlZWiUlEpLIwxvC/NckMeHctKSdzqVvTgy8e7sQD1zdQOBE5j3K9STYvL48nnniCmJgYfH197cuHDRtGREQEwcHBbNmyhYkTJ7Jx48azrr78aerUqTz33HPlWaqISLnIPFPIo19s5IdtRwHo2TKIV+6KxM/D1cGViVRsFmOMueyNLRYWLlxIv379zlpXWFjIwIEDOXjwICtWrCgRUP4uPj6e9u3bEx8fT3R09Fnr8/Pzyc/Pt89nZWURFhZGZmbmBfcrIuJICQdPMSoukcMZubg5O/FU7+bc0zFcV02k2srKysLPz++Szt/lcgWlsLCQQYMGkZyczE8//XTRIqKjo3F1dWX37t3nDChWqxWrVb8JICKVg81m+O+afbzy3U6KbIbwWp7MHBpN67p+ji5NpNIo84DyZzjZvXs3P//8M7VqXfyXELdu3UphYSEhISFlXY6IyFV16nQB4z/fyE870gHo3SaEaf1b4+OuIR2R0ih1QMnJyWHPnj32+eTkZJKSkvD39yc0NJS77rqLhIQEvv76a4qLi0lLSwPA398fNzc39u7dy5w5c7jtttsICAhg27ZtjB8/nqioKDp37lx2RyYicpWt33+S0XMTSc3Mw83FiUl9WhBzbT0N6YhchlLfg7JixQq6det21vLhw4czefJkIiIizrndzz//zI033khKSgr/+Mc/2LJlCzk5OYSFhdG7d28mTZqEv/+l/Q5AacawRETKm81meHflXqYv20WxzdAgwIuZMdG0CNW/TyJ/VZrz9xXdJOsoCigiUlEcz8ln3GcbWbXrGAB3RtXhxX6t8LLqTSIif+fwm2RFRKqD3/adYPTcRNKz83F3deL5vq0Y2L6uhnREyoACiohIKRXbDDN/2sNbP+7CZqBRoDfvDIumSZCPo0sTqTIUUERESiE9O49/zU/ilz0nALirXV2ev6Mlnm7651SkLOm/KBGRS/TLnuOMmZfE8Zx8PFydebFfKwa0q+voskSqJAUUEZGLKCq2MePH3bz98x6MgaZBPswaFk2jQG9HlyZSZSmgiIhcwNGsPEbNTWRd8kkAhl4bxrO3t8TDzdnBlYlUbQooIiLnsXLXMf41P4mTpwvwcnNmSv/W3NG2jqPLEqkWFFBERP6mqNjG68t28e6KvQC0CPFl1rBoIgK8HFyZSPWhgCIi8hdHMnIZPTeRDQdOAXB3h3Ce6t0cd1cN6YhcTQooIiL/58ftRxn/+UYyzhTiY3Xh5bvacFtrvcRUxBEUUESk2isosvHKdzv475pkANrU9WPm0Gjq1fJ0cGUi1ZcCiohUayknzzBqbiJJKRkA3Nc5gsd7NcXqoiEdEUdSQBGRauv7rWk8+vlGsvKK8HV34dWBkfRsGezoskQEBRQRqYbyi4qZunQHH67dD0BUvRq8PTSKujU1pCNSUSigiEi1cuDEaWLjEtl8OBOA/++GBjzasymuzk4OrkxE/koBRUSqjW82pfLEl5vIzi+ihqcr0wdFclOzIEeXJSLnoIAiIlVeXmExL36zjU9/OwhA+/CazBgaRWgNDwdXJiLno4AiIlXavmM5jIxLZHtqFgAP39iQcbc00ZCOSAWngCIiVdZXiYd5cuFmzhQUU8vLjemD29K1SW1HlyUil0ABRUSqnNyCYiYv3sr8DSkAXBfhz4yhUQT5uju4MhG5VAooIlKl7D6azci4BHYdzcFigVE3NWb0TY1w0ZCOSKWigCIiVcbnG1J4dtFWcguLCfC28taQtnRuFODoskTkMiigiEildzq/iGcWbWFBwmEAOjeqxRuD2xLooyEdkcpKAUVEKrUdaVmMnJPA3mOncbLAv7o34ZFujXB2sji6NBG5AgooIlIpGWOYvz6FSYu3kl9kI8jXyltDoujQoJajSxORMqCAIiKVTk5+EU8t3MyipCMAdG1Sm+mDIqnlbXVwZSJSVhRQRKRS2Xokk9i4RJKPn8bZycKEHk158IYGOGlIR6RKUUARkUrBGMOnvx/kha+3UVBkI8TPnbeHRtG+vr+jSxORcqCAIiIVXlZeIRO/3Mw3m1MBuLlZIK8NjKSml5uDKxOR8qKAIiIV2qZDGcTGJXLw5BlcnCw80asZ93eJwGLRkI5IVaaAIiIVkjGGD9fuZ8rS7RQWG+rU8GBmTBRR9Wo6ujQRuQpK/dvPq1atok+fPoSGhmKxWPjqq69KrDfGMHnyZEJDQ/Hw8ODGG29k69atJdrk5+czatQoAgIC8PLyom/fvhw6dOiKDkREqo7MM4U8+Ek8zy3ZRmGxoUeLIJaOvl7hRKQaKXVAOX36NJGRkcycOfOc61955RWmT5/OzJkzWb9+PcHBwdxyyy1kZ2fb24wdO5aFCxcyb9481qxZQ05ODrfffjvFxcWXfyQiUiUkHjzFbTNW88O2o7g6W5jUpwXv3d0OP09XR5cmIleRxRhjLntji4WFCxfSr18/4I+rJ6GhoYwdO5bHH38c+ONqSVBQEC+//DIPPvggmZmZ1K5dm08++YTBgwcDcOTIEcLCwli6dCk9e/a86OdmZWXh5+dHZmYmvr6+l1u+iFQgNpvhgzXJvPzdDopshnr+nsyMiaJN3RqOLk1Eykhpzt9l+nrP5ORk0tLS6NGjh32Z1Wqla9eurF27FoD4+HgKCwtLtAkNDaVVq1b2Nn+Xn59PVlZWiUlEqo5Tpwt44OMNvLR0O0U2w22tg/l6dBeFE5FqrEwDSlpaGgBBQUEllgcFBdnXpaWl4ebmRs2aNc/b5u+mTp2Kn5+ffQoLCyvLskXEgTbsP8ltM1bz04503FyceKFfK2bFROPrriEdkeqsTAPKn/7++J8x5qKPBF6ozcSJE8nMzLRPKSkpZVariDiGzWZ4Z8UeBv/nN1Iz84gI8GLhI524u0O4HiEWkbJ9zDg4OBj44ypJSEiIfXl6err9qkpwcDAFBQWcOnWqxFWU9PR0OnXqdM79Wq1WrFa9Y0OkqjiRk8+4zzayctcxAO5oG8pLd7bG26pfPhCRP5TpFZSIiAiCg4NZtmyZfVlBQQErV660h4927drh6upaok1qaipbtmw5b0ARkarjt30nuG3GalbuOobVxYmXB7TmzcFtFU5EpIRS/4uQk5PDnj177PPJyckkJSXh7+9PvXr1GDt2LFOmTKFx48Y0btyYKVOm4OnpSUxMDAB+fn7cf//9jB8/nlq1auHv78+ECRNo3bo13bt3L7sjE5EKpdhmmPXzHt5cvgubgYa1vXhnWDuaBvs4ujQRqYBKHVA2bNhAt27d7PPjxo0DYPjw4Xz44Yc89thj5Obm8sgjj3Dq1Cmuu+46fvjhB3x8/t8/Qm+88QYuLi4MGjSI3Nxcbr75Zj788EOcnZ3L4JBEpKJJz85j3PyNrNlzHIAB0XV5oV9LPN101UREzu2KfgfFUfQ7KCKVxy97jjNmXhLHc/LxcHXmhX6tuKtdXUeXJSIOUJrzt/73RUTKRbHN8NbyXbz98x6MgaZBPswaFkWjQA3piMjFKaCISJk7mpXH6LmJ/J58EoAh14QxqU9LPNw0jCsil0YBRUTK1Iqd6Yz7bCMnTxfg5ebMlP6tuaNtHUeXJSKVjAKKiJSJomIbry/bxbsr9gLQPMSXWTFRNKjt7eDKRKQyUkARkSt2JCOX0XMT2XDgFAB3dwjnqd7NcXfVkI6IXB4FFBG5Ij9uP8r4zzeScaYQH6sL0wa0oXebkItvKCJyAQooInJZCopsvPLdDv67JhmA1nX8mBkTRXgtLwdXJiJVgQKKiJRayskzjJqbSFJKBgD/7FyfJ3o1w+qiIR0RKRsKKCJSKt9vTePRzzeSlVeEr7sLrw6MpGfLYEeXJSJVjAKKiFyS/KJipi7dwYdr9wPQNqwGbw+NIszf07GFiUiVpIAiIhd14MRpYuMS2Xw4E4AR10fwaM9muLmU6QvRRUTsFFBE5IK+2ZTKE19uIju/iBqerrw+MJKbmwc5uiwRqeIUUETknPIKi3nxm218+ttBANqH12TG0ChCa3g4uDIRqQ4UUETkLPuO5TAyLpHtqVkAPHJjQ8bd0gQXZw3piMjVoYAiIiUsSjrMkws2c7qgmFpebkwf3JauTWo7uiwRqWYUUEQEgNyCYp5bspV561MAuC7CnxlDowjydXdwZSJSHSmgiAh70rMZOSeRnUezsVhgVLdGjL65sYZ0RMRhFFBEqrkv4g/xzFdbyC0sJsDbyltD2tK5UYCjyxKRak4BRaSaOlNQxDNfbeXLhEMAdG5UizcGtyXQR0M6IuJ4Cigi1dDOtGwemRPP3mOncbLA2O5NGNmtEc5OFkeXJiICKKCIVCvGGOavT2HS4q3kF9kI8rXy1pAoOjSo5ejSRERKUEARqSZy8ot4auFmFiUdAeCGJrV5Y1AktbytDq5MRORsCigi1cDWI5mMiktk3/HTODtZGN+jCQ/d0BAnDemISAWlgCJShRlj+PT3g7zw9TYKimyE+LkzY2gU19T3d3RpIiIXpIAiUkVl5RUyccFmvtmUCsDNzQJ5bWAkNb3cHFyZiMjFKaCIVEGbD2UyMi6BgyfP4OJk4fFbm/HA9RFYLBrSEZHKQQFFpAoxxvDR2v1MWbqDgmIbdWp48HZMFNH1ajq6NBGRUlFAEakiMs8U8tiXG/l+61EAerQI4tW7IvHzdHVwZSIipaeAIlIFJB48xai5iRw6lYurs4Unb2vOvZ3qa0hHRCotBRSRSswYwwdrkpn27Q6KbIZ6/p7MjImiTd0aji5NROSKKKCIVFKnThcw4fON/LgjHYDbWgczbUAbfN01pCMilV+Zv0u9fv0/Liv/fRo5ciQA995771nrOnToUNZliFRp8QdO0nvGan7ckY6bixMv9GvFrJhohRMRqTLK/ArK+vXrKS4uts9v2bKFW265hYEDB9qX3XrrrcyePds+7+am32UQuRQ2m+G9Vft47YedFNsMEQFezIyJomWon6NLExEpU2UeUGrXrl1iftq0aTRs2JCuXbval1mtVoKDg8v6o0WqtBM5+Yz7bCMrdx0DoG9kKFP6t8bbqpFaEal6ynyI568KCgr49NNPue+++0o8TbBixQoCAwNp0qQJI0aMID09vTzLEKn0ft93gttmrGblrmNYXZyY2r81bw1pq3AiIlVWuf7r9tVXX5GRkcG9995rX9arVy8GDhxIeHg4ycnJPPPMM9x0003Ex8djtZ77rar5+fnk5+fb57OyssqzbJEKo9hmeOfnPbyxfBc2Aw1rezFrWDTNgn0dXZqISLmyGGNMee28Z8+euLm5sWTJkvO2SU1NJTw8nHnz5tG/f/9ztpk8eTLPPffcWcszMzPx9dU/1FI1HcvO51/zk1iz5zgAA6Lr8kK/lni66aqJiFROWVlZ+Pn5XdL5u9yGeA4cOMDy5ct54IEHLtguJCSE8PBwdu/efd42EydOJDMz0z6lpKSUdbkiFcove47T663VrNlzHA9XZ14bGMnrgyIVTkSk2ii3f+1mz55NYGAgvXv3vmC7EydOkJKSQkhIyHnbWK3W8w7/iFQlxTbDWz/u5u2fdmMMNA3yYWZMFI2DfBxdmojIVVUuAcVmszF79myGDx+Oi8v/+4icnBwmT57MgAEDCAkJYf/+/Tz55JMEBARw5513lkcpIpXG0aw8Rs9N5PfkkwAMuSaMSX1a4uHm7ODKRESuvnIJKMuXL+fgwYPcd999JZY7OzuzefNmPv74YzIyMggJCaFbt27Mnz8fHx/9H6JUXyt3HWPc/CROnC7Ay82ZKf1bc0fbOo4uS0TEYcr1JtnyUpqbbEQqsqJiG9OX7eKdFXsBaB7iy6yYKBrU9nZwZSIiZa8052/dcSfiIEcychk9N5ENB04B8I8O9Xi6dwvcXTWkIyKigCLiAD/tOMq4zzaScaYQb6sL0wa05vY2oY4uS0SkwlBAEbmKCottvPr9Tv6zah8Arev4MTMmivBaXg6uTESkYlFAEblKDp06Q2xcIkkpGQDc26k+E29rhtVFQzoiIn+ngCJyFXy/NY1HP99IVl4Rvu4uvHJXJLe20gszRUTORwFFpBwVFNmY+u12Zv+yH4DIsBrMHBpFmL+nYwsTEangFFBEysnBE2eInZvApkOZAIy4PoJHezbDzaVcXyIuIlIlKKCIlIOlm1N5/ItNZOcXUcPTldfuiqR7iyBHlyUiUmkooIiUobzCYl76Zjuf/HYAgHbhNZkxNIo6NTwcXJmISOWigCJSRpKPn2bknAS2pWYB8FDXhozv0QRXZw3piIiUlgKKSBlYlHSYJxds5nRBMf5ebrw+KJJuTQMdXZaISKWlgCJyBfIKi3luyVbmrksB4NoIf2YMiSLYz93BlYmIVG4KKCKXaU96DrFxCexIy8ZigdhujRhzc2NcNKQjInLFFFBELsOChEM8tXALuYXFBHhbeXNwW7o0DnB0WSIiVYYCikgpnCko4tlFW/ki/hAAnRrW4s0hbQn00ZCOiEhZUkARuUS7jmYzck4Cu9NzcLLAmJubEHtTI5ydLI4uTUSkylFAEbkIYwyfbUhh0uKt5BXaCPSx8taQKDo2rOXo0kREqiwFFJELyMkv4umFm/kq6QgA1zcO4I3BbQnwtjq4MhGRqk0BReQ8th3JIjYugX3HT+PsZGF8jyY8dENDnDSkIyJS7hRQRP7GGMOc3w/y/NfbKCiyEeLnzoyhUVxT39/RpYmIVBsKKCJ/kZ1XyMQFm/l6UyoANzUL5LWBkfh7uTm4MhGR6kUBReT/bD6USezcBA6cOIOLk4XHbm3KA10aaEhHRMQBFFCk2jPG8NHa/UxZuoOCYht1angwY2gU7cJrOro0EZFqSwFFqrXM3EIe/2IT321NA+CWFkG8dlckfp6uDq5MRKR6U0CRaispJYPYuAQOncrF1dnCxF7N+Wfn+lgsGtIREXE0BRSpdowxfLAmmWnf7qDIZgjz92Dm0Ggiw2o4ujQREfk/CihSrWScKWDC5xtZvj0dgNtaBzNtQBt83TWkIyJSkSigSLURf+Ako+ISOZKZh5uzE8/c3px/dAjXkI6ISAWkgCJVns1m+M/qfbz6/U6KbYb6tTyZGRNNqzp+ji5NRETOQwFFqrQTOfmM/3wjK3YeA6BPZChT7myFj4Z0REQqNAUUqbJ+33eC0fMSOZqVj9XFicl9WzLkmjAN6YiIVAIKKFLlFNsM7/y8hzeW78JmoEFtL2bFRNM8xNfRpYmIyCVyKusdTp48GYvFUmIKDg62rzfGMHnyZEJDQ/Hw8ODGG29k69atZV2GVFPHsvMZ/r91vL7sj3DSP6oOS2K7KJyIiFQyZR5QAFq2bElqaqp92rx5s33dK6+8wvTp05k5cybr168nODiYW265hezs7PIoRaqRtXuO0+ut1azZcxwPV2devasN0we3xcuqC4UiIpVNufzL7eLiUuKqyZ+MMbz55ps89dRT9O/fH4CPPvqIoKAg4uLiePDBB8ujHKniim2Gt37czds/7cYYaBLkzayYaBoH+Ti6NBERuUzlcgVl9+7dhIaGEhERwZAhQ9i3bx8AycnJpKWl0aNHD3tbq9VK165dWbt27Xn3l5+fT1ZWVolJBOBoVh7D/vsbM378I5wMbh/GopFdFE5ERCq5Mg8o1113HR9//DHff/8977//PmlpaXTq1IkTJ06QlvbHC9mCgoJKbBMUFGRfdy5Tp07Fz8/PPoWFhZV12VIJrdp1jNveWs1v+07i6ebMm4Pb8vJdbfBwc3Z0aSIicoXKfIinV69e9j+3bt2ajh070rBhQz766CM6dOgAcNZjnsaYCz76OXHiRMaNG2efz8rKUkipxoqKbbyxfBfvrNiLMdA8xJdZMVE0qO3t6NJERKSMlPvdg15eXrRu3Zrdu3fTr18/ANLS0ggJCbG3SU9PP+uqyl9ZrVasVmt5lyqVQGpmLqPnJrJ+/ykAhl1Xj2dub4G7q66aiIhUJeVyD8pf5efns337dkJCQoiIiCA4OJhly5bZ1xcUFLBy5Uo6depU3qVIJffzjnRue2s16/efwtvqwsyYKF66s7XCiYhIFVTmV1AmTJhAnz59qFevHunp6bz44otkZWUxfPhwLBYLY8eOZcqUKTRu3JjGjRszZcoUPD09iYmJKetSpIooLLbx2vc7eW/VHzdbt6rjy6yYaMJreTm4MhERKS9lHlAOHTrE0KFDOX78OLVr16ZDhw789ttvhIeHA/DYY4+Rm5vLI488wqlTp7juuuv44Ycf8PHRUxdytsMZuYyKSyDhYAYA93aqz8TbmmF10VUTEZGqzGKMMY4uorSysrLw8/MjMzMTX1/9QmhVtWzbUSZ8vpHM3EJ83F149a423Noq5OIbiohIhVSa87d+YlMqnIIiG9O+3cH/fkkGIDKsBjOHRhHm7+ngykRE5GpRQJEKJeXkGWLjEth4KBOAB7pE8NitzXBzKff7uUVEpAJRQJEK49vNqTz25Say84rw83Dl9YGRdG9x/sfPRUSk6lJAEYfLKyxmytLtfPzrAQDahddkxtAo6tTwcHBlIiLiKAoo4lDJx08TG5fA1iN/vF/pwa4NmNCjKa7OGtIREanOFFDEYRZvPMKTCzaTk1+Ev5cbrw+KpFvTQEeXJSIiFYACilx1eYXFPLdkG3PXHQTg2vr+zBgaRbCfu4MrExGRikIBRa6qPek5xMYlsCMtG4sFYrs1YszNjXHRkI6IiPyFAopcNQsSDvH0V1s4U1BMgLcbbwxuy/WNazu6LBERqYAUUKTcnSkoYtKirXwefwiAjg1q8daQtgT6akhHRETOTQFFytWuo9mMnJPA7vQcnCww5uYmxN7UCGcni6NLExGRCkwBRcqFMYbP4w/x7KIt5BXaqO1jZcaQKDo2rOXo0kREpBJQQJEydzq/iKe/2sLCxMMAXN84gDcGtyXA2+rgykREpLJQQJEytT01i5FxCew7dhpnJwvjbmnCw10b4qQhHRERKQUFFCkTxhji1h3kuSXbKCiyEezrztsxUVxT39/RpYmISCWkgCJXLDuvkIkLNvP1plQAujWtzeuD2uLv5ebgykREpLJSQJErsuVwJrFxCew/cQYXJwuP3dqUB7o00JCOiIhcEQUUuSzGGD7+9QAvfbOdgmIbdWp4MGNoFO3Cazq6NBERqQIUUKTUMnMLefyLTXy3NQ2A7s2DeG1gG2p4akhHRETKhgKKlEpSSgaxcQkcOpWLq7OFib2a88/O9bFYNKQjIiJlRwFFLokxhg/WJPPydzsoLDaE+Xswc2g0kWE1HF2aiIhUQQooclEZZwqY8Pkmlm8/CkCvVsFMG9AGPw9XB1cmIiJVlQKKXFD8gVOMikvgSGYebs5OPH17c+7uEK4hHRERKVcKKHJONpvhP6v38er3Oym2GerX8mRmTDSt6vg5ujQREakGFFDkLCdPFzDusyRW7DwGQJ/IUKbc2Qofdw3piIjI1aGAIiWsSz7J6LmJpGXlYXVxYnLflgy5JkxDOiIiclUpoAjwx5DOOyv2MH3ZLmwGGtT2YlZMNM1DfB1dmoiIVEMKKMKx7HzGfZbE6t3HAegfVYcX+rXCy6qvh4iIOIbOQNXc2r3HGTMviWPZ+bi7OvHCHa0Y2D7M0WWJiEg1p4BSTRXbDG//tJsZP+7GZqBJkDezYqJpHOTj6NJEREQUUKqj9Kw8xsxL4td9JwAY1L4uz/VthYebs4MrExER+YNTWe9w6tSpXHPNNfj4+BAYGEi/fv3YuXNniTb33nsvFoulxNShQ4eyLkXOYfXuY9w2YzW/7juBp5szbwyO5JW7IhVORESkQinzKygrV65k5MiRXHPNNRQVFfHUU0/Ro0cPtm3bhpeXl73drbfeyuzZs+3zbm56E255Kiq28eby3cxasQdjoFmwD7OGRdOwtrejSxMRETlLmQeU7777rsT87NmzCQwMJD4+nhtuuMG+3Gq1EhwcXNYfL+eQmpnLmLlJrNt/EoCY6+rx7O0tcHfVVRMREamYyv0elMzMTAD8/f1LLF+xYgWBgYHUqFGDrl278tJLLxEYGHjOfeTn55Ofn2+fz8rKKr+Cq5ifd6Qz7rMkTp0pxNvqwpT+rekbGeroskRERC7IYowx5bVzYwx33HEHp06dYvXq1fbl8+fPx9vbm/DwcJKTk3nmmWcoKioiPj4eq9V61n4mT57Mc889d9byzMxMfH31Q2LnUlhs47Xvd/Leqn0AtAz1ZVZMNPUDvC6ypYiISPnIysrCz8/vks7f5RpQRo4cyTfffMOaNWuoW7fuedulpqYSHh7OvHnz6N+//1nrz3UFJSwsTAHlPA5n5DIqLoGEgxkADO8YzsTbmmtIR0REHKo0AaXchnhGjRrF4sWLWbVq1QXDCUBISAjh4eHs3r37nOutVus5r6zI2ZZtO8qEzzeSmVuIj7sLrwxoQ6/WIY4uS0REpFTKPKAYYxg1ahQLFy5kxYoVREREXHSbEydOkJKSQkiITqSXq6DIxrRvd/C/X5IBiKzrx8yYaML8PR1cmYiISOmVeUAZOXIkcXFxLFq0CB8fH9LS0gDw8/PDw8ODnJwcJk+ezIABAwgJCWH//v08+eSTBAQEcOedd5Z1OdVCyskzxMYlsPHQHzck398lgsdvbYabS5n/zI2IiMhVUeb3oFgslnMunz17Nvfeey+5ubn069ePxMREMjIyCAkJoVu3brzwwguEhV3aO2BKM4ZV1X23JZVHv9hEdl4Rfh6uvDYwkltaBDm6LBERkbM49B6Ui+UdDw8Pvv/++7L+2Gonr7CYqUu389GvBwCIrleDGUOjqFtTQzoiIlL56V08ldD+46cZGZfA1iN//B7Mg10bMKFHU1ydNaQjIiJVgwJKJbNk4xEmLthMTn4RNT1dmT6oLd2anfsH7kRERCorBZRKIq+wmOe/3kbc7wcBuLa+P28NbUuIn4eDKxMRESl7CiiVwN5jOYyck8COtGwsFhh5YyPGdm+Mi4Z0RESkilJAqeAWJh7iqYVbOFNQTIC3G28Mbsv1jWs7uiwREZFypYBSQeUWFDNp8RY+23AIgI4NavHWkLYE+ro7uDIREZHyp4BSAe06ms3IOQnsTs/BYoExNzdm1E2NcXY692/MiIiIVDUKKBWIMYbP4w/x7KIt5BXaqO1j5a0hbenUMMDRpYmIiFxVCigVxOn8Ip75agsLEg8DcH3jAN4Y3JYAb70kUUREqh8FlApge2oWI+MS2HfsNE4WGN+jKQ93bYiThnRERKSaUkBxIGMMc9elMHnJVgqKbAT7ujNjaBTXRvg7ujQRERGHUkBxkOy8Qp5cuIUlG48AcGPT2kwf1BZ/LzcHVyYiIuJ4CigOsOVwJrFxCew/cQZnJwuP9mzK/3d9Aw3piIiI/B8FlKvIGMMnvx3gxa+3U1Bso04ND2YMjaJdeE1HlyYiIlKhKKBcJZm5hUxcsImlm9MA6N48iNcGtqGGp4Z0RERE/k4B5SrYmJJB7NwEUk7m4ups4Ylezbmvc30sFg3piIiInIsCSjkyxjD7l/1M/XY7hcWGMH8PZg6NJjKshqNLExERqdAUUMpJ5plCHv1iIz9sOwpAr1bBTBvQBj8PVwdXJiIiUvEpoJSDhIOnGBWXyOGMXNycnXj69ubc3SFcQzoiIiKXSAGlDNlshv+u2ccr3+2kyGaoX8uTmTHRtKrj5+jSREREKhUFlDJy8nQBEz7fyE870gHoExnKlDtb4eOuIR0REZHSUkApA+v3n2RUXCJpWXm4uTgxuU9Lhl4bpiEdERGRy6SAcgVsNsO7K/cyfdkuim2GBrW9mBUTTfMQX0eXJiIiUqkpoFym4zn5/Gt+Eqt3Hwfgzqg6vNivFV5WdamIiMiV0tn0Mvy69wRj5iWSnp2Pu6sTz/dtxcD2dTWkIyIiUkYUUEqh2GZ4+6fdzPhxNzYDjQO9mTUsmiZBPo4uTUREpEpRQLlE6dl5jJ2XxNq9JwC4q11dnr+jJZ5u6kIREZGyprPrJViz+zhj5ydyPKcATzdnXuzXiv7RdR1dloiISJWlgHIBRcU23ly+m1kr9mAMNAv2YWZMNI0CvR1dmoiISJWmgHIeaZl5jJ6byLr9JwEYem09JvVpgburs4MrExERqfoUUM7h553pjP9sIydPF+Dl5szUAW3oGxnq6LJERESqDQWUvygstvHaDzt5b+U+AFqG+jIzJpqIAC8HVyYiIlK9ODnyw9955x0iIiJwd3enXbt2rF692pHl8OP2o/ZwMrxjOF8+3EnhRERExAEcdgVl/vz5jB07lnfeeYfOnTvz3nvv0atXL7Zt20a9evUcUlPPlsH8o0M9OjcMoFfrEIfUICIiImAxxhhHfPB1111HdHQ07777rn1Z8+bN6devH1OnTr3gtllZWfj5+ZGZmYmvr957IyIiUhmU5vztkCGegoIC4uPj6dGjR4nlPXr0YO3atWe1z8/PJysrq8QkIiIiVZdDAsrx48cpLi4mKCioxPKgoCDS0tLOaj916lT8/PzsU1hY2NUqVURERBzAoTfJ/v3lesaYc75wb+LEiWRmZtqnlJSUq1WiiIiIOIBDbpINCAjA2dn5rKsl6enpZ11VAbBarVit1qtVnoiIiDiYQ66guLm50a5dO5YtW1Zi+bJly+jUqZMjShIREZEKxGGPGY8bN467776b9u3b07FjR/7zn/9w8OBBHnroIUeVJCIiIhWEwwLK4MGDOXHiBM8//zypqam0atWKpUuXEh4e7qiSREREpIJw2O+gXAn9DoqIiEjlU+F/B0VERETkQhRQREREpMJRQBEREZEKRwFFREREKhwFFBEREalwHPaY8ZX488EjvTRQRESk8vjzvH0pDxBXyoCSnZ0NoJcGioiIVELZ2dn4+fldsE2l/B0Um83GkSNH8PHxOefLBS9VVlYWYWFhpKSk6PdUypn6+upSf1896uurR3199ZRXXxtjyM7OJjQ0FCenC99lUimvoDg5OVG3bt0y25+vr6++7FeJ+vrqUn9fPerrq0d9ffWUR19f7MrJn3STrIiIiFQ4CigiIiJS4VTrgGK1Wpk0aRJWq9XRpVR56uurS/199aivrx719dVTEfq6Ut4kKyIiIlVbtb6CIiIiIhWTAoqIiIhUOAooIiIiUuEooIiIiEiFU60DyjvvvENERATu7u60a9eO1atXO7qkSm/q1Klcc801+Pj4EBgYSL9+/di5c2eJNsYYJk+eTGhoKB4eHtx4441s3brVQRVXDVOnTsVisTB27Fj7MvVz2Tp8+DD/+Mc/qFWrFp6enrRt25b4+Hj7evV32SgqKuLpp58mIiICDw8PGjRowPPPP4/NZrO3UV9fnlWrVtGnTx9CQ0OxWCx89dVXJdZfSr/m5+czatQoAgIC8PLyom/fvhw6dKh8CjbV1Lx584yrq6t5//33zbZt28yYMWOMl5eXOXDggKNLq9R69uxpZs+ebbZs2WKSkpJM7969Tb169UxOTo69zbRp04yPj4/58ssvzebNm83gwYNNSEiIycrKcmDllde6detM/fr1TZs2bcyYMWPsy9XPZefkyZMmPDzc3Hvvveb33383ycnJZvny5WbPnj32NurvsvHiiy+aWrVqma+//tokJyebzz//3Hh7e5s333zT3kZ9fXmWLl1qnnrqKfPll18awCxcuLDE+kvp14ceesjUqVPHLFu2zCQkJJhu3bqZyMhIU1RUVOb1VtuAcu2115qHHnqoxLJmzZqZJ554wkEVVU3p6ekGMCtXrjTGGGOz2UxwcLCZNm2avU1eXp7x8/Mz//73vx1VZqWVnZ1tGjdubJYtW2a6du1qDyjq57L1+OOPmy5dupx3vfq77PTu3dvcd999JZb179/f/OMf/zDGqK/Lyt8DyqX0a0ZGhnF1dTXz5s2ztzl8+LBxcnIy3333XZnXWC2HeAoKCoiPj6dHjx4llvfo0YO1a9c6qKqqKTMzEwB/f38AkpOTSUtLK9H3VquVrl27qu8vw8iRI+nduzfdu3cvsVz9XLYWL15M+/btGThwIIGBgURFRfH+++/b16u/y06XLl348ccf2bVrFwAbN25kzZo13HbbbYD6urxcSr/Gx8dTWFhYok1oaCitWrUql76vlC8LvFLHjx+nuLiYoKCgEsuDgoJIS0tzUFVVjzGGcePG0aVLF1q1agVg799z9f2BAweueo2V2bx580hISGD9+vVnrVM/l619+/bx7rvvMm7cOJ588knWrVvH6NGjsVqt3HPPPervMvT444+TmZlJs2bNcHZ2pri4mJdeeomhQ4cC+m6Xl0vp17S0NNzc3KhZs+ZZbcrj3FktA8qfLBZLiXljzFnL5PLFxsayadMm1qxZc9Y69f2VSUlJYcyYMfzwww+4u7uft536uWzYbDbat2/PlClTAIiKimLr1q28++673HPPPfZ26u8rN3/+fD799FPi4uJo2bIlSUlJjB07ltDQUIYPH25vp74uH5fTr+XV99VyiCcgIABnZ+ezEl96evpZ6VEuz6hRo1i8eDE///wzdevWtS8PDg4GUN9fofj4eNLT02nXrh0uLi64uLiwcuVKZsyYgYuLi70v1c9lIyQkhBYtWpRY1rx5cw4ePAjoe12WHn30UZ544gmGDBlC69atufvuu/nXv/7F1KlTAfV1ebmUfg0ODqagoIBTp06dt01ZqpYBxc3NjXbt2rFs2bISy5ctW0anTp0cVFXVYIwhNjaWBQsW8NNPPxEREVFifUREBMHBwSX6vqCggJUrV6rvS+Hmm29m8+bNJCUl2af27dszbNgwkpKSaNCggfq5DHXu3Pmsx+V37dpFeHg4oO91WTpz5gxOTiVPTc7OzvbHjNXX5eNS+rVdu3a4urqWaJOamsqWLVvKp+/L/LbbSuLPx4w/+OADs23bNjN27Fjj5eVl9u/f7+jSKrWHH37Y+Pn5mRUrVpjU1FT7dObMGXubadOmGT8/P7NgwQKzefNmM3ToUD0iWAb++hSPMernsrRu3Trj4uJiXnrpJbN7924zZ84c4+npaT799FN7G/V32Rg+fLipU6eO/THjBQsWmICAAPPYY4/Z26ivL092drZJTEw0iYmJBjDTp083iYmJ9p/XuJR+feihh0zdunXN8uXLTUJCgrnpppv0mHF5mDVrlgkPDzdubm4mOjra/iisXD7gnNPs2bPtbWw2m5k0aZIJDg42VqvV3HDDDWbz5s2OK7qK+HtAUT+XrSVLlphWrVoZq9VqmjVrZv7zn/+UWK/+LhtZWVlmzJgxpl69esbd3d00aNDAPPXUUyY/P9/eRn19eX7++edz/vs8fPhwY8yl9Wtubq6JjY01/v7+xsPDw9x+++3m4MGD5VKvxRhjyv66jIiIiMjlq5b3oIiIiEjFpoAiIiIiFY4CioiIiFQ4CigiIiJS4SigiIiISIWjgCIiIiIVjgKKiIiIVDgKKCIiIlLhKKCIiIhIhaOAIiIiIhWOAoqIiIhUOAooIiIiUuH8/1aQZO/CBxvMAAAAAElFTkSuQmCC",
+ "text/plain": [
+ " "
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "fig1 = plt.figure(label=\"first_figure\")\n",
+ "ax = plt.subplot(1,1,1)\n",
+ "ax.plot(x,x*2)\n",
+ "ax.text(0.02,0.95,config[\"pathPP\"],ha=\"left\",transform=ax.transAxes)\n",
+ "ax.text(0.02,0.90,config[\"dora_id\"],ha=\"left\",transform=ax.transAxes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "7943ebef-004c-4b1e-b71a-ad4e6fa6c3f3",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[ "
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "fig2 = plt.figure(label=\"second_figure\")\n",
+ "ax = plt.subplot(1,1,1)\n",
+ "ax.plot(x,np.sqrt(x))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/diagnostics/forcing_feedback/forcing_feedback.py b/diagnostics/forcing_feedback/forcing_feedback.py
index 23b968bf0..d8c8baf9e 100644
--- a/diagnostics/forcing_feedback/forcing_feedback.py
+++ b/diagnostics/forcing_feedback/forcing_feedback.py
@@ -59,7 +59,7 @@
try:
os.system("python " + os.environ["POD_HOME"] + "/" + "forcing_feedback_kernelcalcs.py")
- print('Working Directory is ' + os.environ['WK_DIR'])
+ print('Working Directory is ' + os.environ['WORK_DIR'])
print('Forcing Feedback POD is executing')
except RuntimeError as e1:
print('WARNING', e1.errno, e1.strerror)
diff --git a/diagnostics/forcing_feedback/forcing_feedback_plot.py b/diagnostics/forcing_feedback/forcing_feedback_plot.py
index 865c46def..e1d5cf2f6 100644
--- a/diagnostics/forcing_feedback/forcing_feedback_plot.py
+++ b/diagnostics/forcing_feedback/forcing_feedback_plot.py
@@ -35,18 +35,18 @@
# Read in model results
-nc_pl = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_Planck.nc")
-nc_lr = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LapseRate.nc")
-nc_lw_q = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_WaterVapor.nc")
-nc_sw_q = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_WaterVapor.nc")
-nc_alb = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SfcAlbedo.nc")
-nc_lw_c = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_Cloud.nc")
-nc_sw_c = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_Cloud.nc")
-nc_lw_irf = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_IRF.nc")
-nc_sw_irf = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_IRF.nc")
-nc_lw_netrad = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_Rad.nc")
-nc_sw_netrad = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_Rad.nc")
-nc_strat = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_StratFB.nc")
+nc_pl = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_Planck.nc")
+nc_lr = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LapseRate.nc")
+nc_lw_q = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_WaterVapor.nc")
+nc_sw_q = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_WaterVapor.nc")
+nc_alb = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SfcAlbedo.nc")
+nc_lw_c = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_Cloud.nc")
+nc_sw_c = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_Cloud.nc")
+nc_lw_irf = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_IRF.nc")
+nc_sw_irf = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_IRF.nc")
+nc_lw_netrad = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_Rad.nc")
+nc_sw_netrad = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_Rad.nc")
+nc_strat = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_StratFB.nc")
lat_model = nc_sw_irf.lat.values
weights_model = np.cos(np.deg2rad(lat_model))
@@ -147,7 +147,7 @@
xterms = ['', 'IRF', '']
ax2.set_xticks([r for r in range(len(xterms))], xterms)
plt.tight_layout()
-plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_CMIP6scatter.eps')
+plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_CMIP6scatter.eps')
plt.close()
if np.max(nc_sw_irf.lon.values) >= 300: # convert 0-360 lon to -180-180 lon for plotting
diff --git a/diagnostics/forcing_feedback/forcing_feedback_util.py b/diagnostics/forcing_feedback/forcing_feedback_util.py
index 6396bb862..22c38817d 100644
--- a/diagnostics/forcing_feedback/forcing_feedback_util.py
+++ b/diagnostics/forcing_feedback/forcing_feedback_util.py
@@ -381,7 +381,7 @@ def fluxanom_nc_create(variable, lat, lon, fbname):
"""
var = xr.DataArray(variable, coords=[lat, lon], dims=['lat', 'lon'], name=fbname)
- var.to_netcdf(os.environ['WK_DIR'] + '/model/netCDF/fluxanom2D_' + fbname + '.nc')
+ var.to_netcdf(os.environ['WORK_DIR'] + '/model/netCDF/fluxanom2D_' + fbname + '.nc')
return None
@@ -449,7 +449,7 @@ def bargraph_plotting(model_bar, obs_bar, var_units, var_legnames, var_filename)
plt.ylabel(var_units)
plt.xticks([r + barWidth for r in range(len(model_bar))], var_legnames)
plt.legend(loc="upper right")
- plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_globemean_' + var_filename + '.eps')
+ plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_globemean_' + var_filename + '.eps')
plt.close()
return None
@@ -540,7 +540,7 @@ def map_plotting_4subs(cbar_levs1, cbar_levs2, var1_name, var1_model, \
if np.all(cbar_levs1 == cbar_levs2):
cbar = plt.colorbar(cs, ax=axs.ravel(), orientation='horizontal', aspect=25)
cbar.set_label(var_units)
- plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_maps_' + \
+ plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_maps_' + \
var_filename + '.eps', bbox_inches='tight')
plt.close()
@@ -593,7 +593,7 @@ def map_plotting_2subs(cbar_levs, var_name, var_model,
cbar = plt.colorbar(cs, ax=axs.ravel(), orientation='horizontal', aspect=25)
cbar.set_label(var_units)
- plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_maps_' + \
+ plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_maps_' + \
var_filename + '.eps', bbox_inches='tight')
plt.close()
diff --git a/diagnostics/mixed_layer_depth/mixed_layer_depth.html b/diagnostics/mixed_layer_depth/mixed_layer_depth.html
index be95dcb97..ea0c97376 100644
--- a/diagnostics/mixed_layer_depth/mixed_layer_depth.html
+++ b/diagnostics/mixed_layer_depth/mixed_layer_depth.html
@@ -2,31 +2,33 @@
This file is the HTML output template for the example diagnostic and shows
how you should describe and link to the plots your diagnostic generates.
The framework will "fill in the blanks" corresponding to environment
- variables and copy the file to ![]() Mixed Layer Depth-This POD computes mixed layer depth from CMIP6 monthly temperature and salinity. Mixed layer depth computed from the EN4 reanalysis temperature and salinity is included to compare with models. +This POD computes mixed layer depth from CMIP6 monthly temperature and salinity. + Mixed layer depth computed from the EN4 reanalysis temperature and salinity is included to compare with models. + -These figures show the mixed layer depth climatology for each month. Note that the colorbar varies between subplots. Users may wish to modify this. +These figures show the mixed layer depth climatology for each month. Note that the colorbar varies between subplots. + Users may wish to modify this. + The results from this POD are to be appear in a paper being prepared for the MAPP team special issue.
Example diagnostic: ocean surface flux diagnostic
Influence of vertically propagating waves on the extratropical stratosphere<
| |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Model
@@ -49,7 +49,7 @@ {{CASENAME}}-Eddy Heat Flux vs Polar Cap Height Lag Correlations +< color=navy>Eddy Heat Flux vs Polar Cap Height Lag Correlations
|