diff --git a/__init__.py b/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/data/fieldlist_NCAR.jsonc b/data/fieldlist_CESM.jsonc similarity index 92% rename from data/fieldlist_NCAR.jsonc rename to data/fieldlist_CESM.jsonc index a51014815..fae2c8ed6 100644 --- a/data/fieldlist_NCAR.jsonc +++ b/data/fieldlist_CESM.jsonc @@ -5,7 +5,7 @@ // Source: https://www.cesm.ucar.edu/models/cesm2/atmosphere/docs/ug6/hist_flds_f2000.html // CF variables not on that list are commented out { - "name" : "NCAR", + "name" : "CESM", "models": ["CAM4", "CESM2", "CESM"], // others? "coords" : { // only used for taking slices, unit conversion @@ -13,6 +13,7 @@ "lat": {"axis": "Y", "standard_name": "latitude", "units": "degrees_north"}, "TLONG": {"axis": "X", "standard_name": "array of t-grid longitudes", "units": "degrees_east"}, "TLAT": {"axis": "Y", "standard_name": "array of t-grid latitudes", "units": "degrees_north"}, + "plev": { "standard_name": "air_pressure", "units": "hPa", @@ -48,12 +49,14 @@ "variables" : { "U": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "U{value}"}, "ndim": 4 }, "V": { "standard_name": "northward_wind", + "realm":"atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "V{value}"}, "ndim": 4 @@ -61,67 +64,79 @@ "Z3": { "standard_name": "geopotential_height", "units": "m", + "realm": "atmos", // note: 4d name is 'Z3' but Z500 = height at 500 mb, etc. "scalar_coord_templates": {"plev": "Z{value}"}, "ndim": 4 }, "Z500": { "standard_name": "geopotential_height_500mb", + "realm": "atmos", "units": "m", // note: 4d name is 'Z3' but Z500 = height at 500 mb, etc. "ndim": 3 }, "Q": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 4 }, "OMEGA": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", "scalar_coord_templates": {"plev": "OMEGA{value}"}, "ndim": 4 }, "TS": { "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "ndim": 3 }, "PS": { "standard_name": "surface_air_pressure", + "realm": "atmos", // note: not hPa "units": "Pa", "ndim": 3 }, "PRECT": { "standard_name": "precipitation_rate", + "realm": "atmos", "units": "m s-1", "ndim": 3 }, "PRECC": { "standard_name": "convective_precipitation_rate", + "realm": "atmos", "units": "m s-1", "ndim": 3 }, "TREFHT" : { // correct name? CMIP6 equivalent should be tas, temp at 2m ref height "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 3, "modifier": "atmos_height" }, "TAUX": { "standard_name": "surface_downward_eastward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "TAUY": { "standard_name": "surface_downward_northward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "PSL": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", "ndim": 3 }, @@ -149,21 +164,25 @@ "FLUS": { // correct name? Guessed according to pattern -- no FLUS, only the net combination? "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "FLDS": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "FLUT": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "FLNT": { "standard_name": "net_upward_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, @@ -229,32 +248,37 @@ }, "SHFLX": { "standard_name": "surface_upward_sensible_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "LHFLX": { "standard_name": "surface_upward_latent_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "zos": { "standard_name": "sea_surface_height_above_geoid", + "realm": "ocean", "units": "m", "ndim": 3 }, "tauuo": { "standard_name": "surface_downward_x_stress", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "tauvo": { "standard_name": "surface_downward_y_stress", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "areacello": { "standard_name": "cell_area", - "modifier": "ocean_realm", + "realm": "ocean", "units": "m2", "ndim": 2 }, @@ -281,11 +305,13 @@ // }, "THETAL": { "standard_name": "sea_water_potential_temperature", + "realm": "ocean", "units": "K", "ndim": 4 }, "SST": { "standard_name": "Potential Temperature", + "realm": "ocean", "units": "degC", "ndim": 4 }, @@ -293,6 +319,7 @@ // ta: 3D temperature, units = K: "T": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 4 }, @@ -300,6 +327,7 @@ "prw": { // check name: is this column integrated? "standard_name": "atmosphere_mass_content_of_water_vapor", + "realm": "atmos", "units": "kg m-2", "ndim": 3 } diff --git a/data/fieldlist_CMIP.jsonc b/data/fieldlist_CMIP.jsonc index 2214efb2b..a38ac77f2 100644 --- a/data/fieldlist_CMIP.jsonc +++ b/data/fieldlist_CMIP.jsonc @@ -3,33 +3,17 @@ // in strings, so you'll want to turn word wrap on in your editor. // { - "name" : "CMIP", + "name": "CMIP", "models": ["CMIP_GFDL"], - "coords" : { - // only used for taking slices, unit conversion - "lon": {"axis": "X", "standard_name": "longitude", "units": "degrees_east"}, - "lat": {"axis": "Y", "standard_name": "latitude", "units": "degrees_north"}, - "time": {"axis": "T", "standard_name": "time", "units": "days"}, - // eventually want contents of CMIP6.coordinate.json + "coords": { "plev": { "standard_name": "air_pressure", "units": "hPa", "positive": "down", "axis": "Z" }, - "standard_hybrid_sigma": { - "standard_name": "atmosphere_hybrid_sigma_pressure_coordinate", - "units": "1", - "axis": "Z", - "positive": "down" - }, - "lev": { - "standard_name": "depth", - "units": "m", - "positive": "down", - "axis": "Z" - } - }, + "$ref": "./cmip6-cmor-tables/Tables/CMIP6_coordinate.json" + }, "aux_coords": { "deptho": { "standard_name": "sea_floor_depth_below_geoid", @@ -45,6 +29,7 @@ "variables" : { "ua": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "ua{value}"}, "ndim": 4 @@ -57,99 +42,115 @@ }, "va": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "va{value}"}, "ndim": 4 }, "wind_speed": { "standard_name": "wind_speed", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "wind_speed{value}"}, "ndim": 4 }, "zg": { "standard_name": "geopotential_height", + "realm": "atmos", "units": "m", "scalar_coord_templates": {"plev": "zg{value}"}, "ndim": 4 }, "hus": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 4 }, "wap": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", "scalar_coord_templates": {"plev": "wap{value}"}, "ndim": 4 }, "o3": { "standard_name": "mole_fraction_of_ozone_in_air", + "realm": "atmos", "units": "mol mol-1", "ndim": 4 }, "ts": { "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "ndim": 3 }, "huss": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 3, "modifier": "atmos_height" }, "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "ndim": 3 }, "prc": { "standard_name": "convective_precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "ndim": 3 }, "tp": { "standard_name": "total_precipitation", + "realm": "atmos", "units": "kg m-2", "ndim": 3 }, "tas" : { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 3, "modifier": "atmos_height" }, "tauu": { "standard_name": "surface_downward_eastward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "tauv": { "standard_name": "surface_downward_northward_stress", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "areacello": { "standard_name": "cell_area", - "modifier" : "ocean_realm", + "realm": "ocean", "units": "m2", "ndim": 2 }, "areacella": { "standard_name": "cell_area", + "realm": "atmos", "units": "m2", - "modifier" : "atmos_realm", "ndim": 2 }, "ps": { "standard_name": "surface_air_pressure", + "realm": "atmos", "units": "Pa", "ndim": 3 }, "psl": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", "ndim": 3 }, @@ -160,6 +161,7 @@ }, "sfcWind": { "standard_name": "wind_speed", + "realm": "atmos", "units": "m s-1", "modifier": "atmos_height", "ndim": 3 @@ -172,31 +174,37 @@ // radiative fluxes: "rsus": { "standard_name": "surface_upwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsds": { "standard_name": "surface_downwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsdt": { "standard_name": "toa_incoming_shortwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsut": { "standard_name": "toa_outgoing_shortwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rlus": { "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rlds": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, @@ -218,67 +226,80 @@ }, "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsdscs": { "standard_name": "surface_downwelling_shortwave_flux_in_air_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsuscs": { "standard_name": "surface_upwelling_shortwave_flux_in_air_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rlutcs": { "standard_name": "toa_outgoing_longwave_flux_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "rsutcs": { "standard_name": "toa_outgoing_shortwave_flux_assuming_clear_sky", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "hfss": { "standard_name": "surface_upward_sensible_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, "hfls": { "standard_name": "surface_upward_latent_heat_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, // Variables for AMOC_3D_Structure module: "uo": { "standard_name": "sea_water_x_velocity", + "realm": "ocean", "units": "m s-1", "ndim": 4 }, "vo": { "standard_name": "sea_water_y_velocity", + "realm": "ocean", "units": "m s-1", "ndim": 4 }, "so": { "standard_name": "sea_water_salinity", + "realm": "ocean", "units": "psu", "ndim": 4 }, "umo": { "standard_name": "ocean_mass_x_transport", + "realm": "ocean", "units": "kg s-1", "ndim": 4 }, "vmo": { "standard_name": "ocean_mass_y_transport", + "realm": "ocean", "units": "kg s-1", "ndim": 4 }, "thetao": { "standard_name": "sea_water_potential_temperature", + "realm": "ocean", "units": "degC", "ndim": 4 }, @@ -286,6 +307,7 @@ // ta: 3D temperature, units = K: "ta": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "scalar_coord_templates" : {"plev": "ta{value}"}, "ndim": 4 @@ -293,38 +315,45 @@ // prw: Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) "prw": { "standard_name": "atmosphere_mass_content_of_water_vapor", + "realm": "atmos", "units": "kg m-2", "ndim": 3 }, // Variables for SM_ET_coupling module "mrsos": { "standard_name": "mass_content_of_water_in_soil_layer", + "realm": "land", "units": "kg m-2", "ndim": 3 }, "evspsbl": { "standard_name": "water_evapotranspiration_flux", + "realm": "land", "units": "kg m-2 s-1", "ndim": 3 }, // Ice-Ocean variables "siconc": { "standard_name": "sea_ice_area_fraction", + "realm": "seaIce", "units": "%", "ndim": 3 }, "tauuo": { "standard_name": "downward_x_stress_at_sea_water_surface", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "tauvo": { "standard_name": "downward_y_stress_at_sea_water_surface", + "realm": "ocean", "units": "N m-2", "ndim": 3 }, "zos": { "standard_name": "sea_surface_height_above_geoid", + "realm": "ocean", "units": "m", "ndim": 3 } diff --git a/data/fieldlist_GFDL.jsonc b/data/fieldlist_GFDL.jsonc index e13a994d2..980512bc5 100644 --- a/data/fieldlist_GFDL.jsonc +++ b/data/fieldlist_GFDL.jsonc @@ -47,29 +47,34 @@ "variables" : { "ucomp": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "u{value}"}, "ndim": 4 }, "vcomp": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "scalar_coord_templates": {"plev": "v{value}"}, "ndim": 4 }, "hght": { "standard_name": "geopotential_height", + "realm": "atmos", "units": "m", "scalar_coord_templates": {"plev": "hght{value}"}, "ndim": 4 }, "sphum": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "ndim": 4 }, "omega": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", // need to verify "scalar_coord_templates": {"plev": "omega{value}"}, "ndim": 4 @@ -77,79 +82,94 @@ "t_surf": { // "skin temperature", analogue of ts "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "ndim": 3 }, "precip": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "ndim": 3 }, "prec_conv": { "standard_name": "convective_precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", // need to verify "ndim": 3 }, "t_ref" : { // CMIP6 equivalent = tas, temp at 2m ref height "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 3, "modifier": "atmos_height" }, "ps": { "standard_name": "surface_air_pressure", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, "tau_x": { "standard_name": "surface_downward_eastward_stress", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, "tau_y": { "standard_name": "surface_downward_northward_stress", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, "slp": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", // need to verify "ndim": 3 }, // radiative fluxes: "swup_sfc": { "standard_name": "surface_upwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "swdn_sfc": { "standard_name": "surface_downwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "swdn_toa": { "standard_name": "toa_incoming_shortwave_flux", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "swup_toa": { "standard_name": "toa_outgoing_shortwave_flux", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "lwup_sfc": { "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "lwdn_sfc": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", // need to verify "ndim": 3 }, "olr": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "ndim": 3 }, @@ -256,6 +276,7 @@ // }, "salt": { "standard_name": "sea_water_salinity", + "realm": "ocean", "units": "psu", "ndim": 4 }, @@ -275,12 +296,14 @@ // Variables for Convective Transition Diagnostics module: "temp": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "ndim": 4 }, "WVP": { // column integral; over the whole column? "standard_name": "atmosphere_mass_content_of_water_vapor", + "realm": "atmos", "units": "kg m-2", "ndim": 3 } diff --git a/data/modifiers.jsonc b/data/modifiers.jsonc index 51434c9a0..c65add153 100644 --- a/data/modifiers.jsonc +++ b/data/modifiers.jsonc @@ -5,11 +5,5 @@ { "atmos_height" : { "description" : "atmospheric height above the ground (relative to orography) in meters" - }, - "atmos_realm" : { - "description" : "designator for a variable belonging to the atmosphere realm that shares a standard_name with a variable in a different realm" - }, - "ocean_realm" : { - "description" : "designator for a variable belonging to the ocean realm that shares a standard_name with a variable in a different realm" } } diff --git a/diagnostics/ENSO_MSE/ENSO_MSE.html b/diagnostics/ENSO_MSE/ENSO_MSE.html index 796b05a29..66f6c8c6b 100644 --- a/diagnostics/ENSO_MSE/ENSO_MSE.html +++ b/diagnostics/ENSO_MSE/ENSO_MSE.html @@ -6,7 +6,6 @@
-

This POD package consists of four levels. With a focus on identifying leading processes that determine ENSO related precipitation anomalies, main module of the POD estimates @@ -21,9 +20,10 @@

Documentation and Contact Information -

ENSO moist static energy variability diagnostics

+< color=navy>

ENSO moist static energy variability diagnostics


-

Composite plots plots

-

Moist static energy budget plots

-

Moist static energy variance plots

-

Moist static energy scatter plots plots

+

< color=navy>Composite plots plots

+

< color=navy>Moist static energy budget plots

+

< color=navy>Moist static energy variance plots

+

< color=navy>Moist static energy scatter plots plots

+ diff --git a/diagnostics/ENSO_MSE/ENSO_MSE.py b/diagnostics/ENSO_MSE/ENSO_MSE.py index 19d2101ce..27581eb34 100644 --- a/diagnostics/ENSO_MSE/ENSO_MSE.py +++ b/diagnostics/ENSO_MSE/ENSO_MSE.py @@ -23,7 +23,7 @@ now = datetime.datetime.now() print( "STARTING ENSO_MSE.py on:" + now.strftime("%Y-%m-%d %H:%M")) -os.environ["ENSO_MSE_WKDIR"] = os.environ["WK_DIR"] +os.environ["ENSO_MSE_WKDIR"] = os.environ["WORK_DIR"] # TODO remove the ENSO module environment switch definitions after framework # pod_env_vars issue is fixed @@ -40,12 +40,12 @@ # Subpackage control variables optionally set in namelist eg. VAR ENSO_COMPOSITE 1 # nb. OBS isn't really a subpackage but is a switch used by all subpackages -subpackages = ["OBS","COMPOSITE","MSE","MSE_VAR","SCATTER"] -subpack_default = "1" #Run all subpackage unless envvars are set not to +subpackages = ["OBS", "COMPOSITE", "MSE", "MSE_VAR", "SCATTER"] +subpack_default = "1" # Run all subpackage unless envvars are set not to for subpack in subpackages: - os.environ["ENSO_"+subpack] = os.environ.get("ENSO_"+subpack,subpack_default) - os.environ["ENSO_MSE_WKDIR_"+subpack] = os.environ["ENSO_MSE_WKDIR"]+"/"+subpack + os.environ["ENSO_" + subpack] = os.environ.get("ENSO_" + subpack,subpack_default) + os.environ["ENSO_MSE_WKDIR_"+subpack] = os.environ["ENSO_MSE_WKDIR"] + "/"+subpack print(subpack, os.environ["ENSO_"+subpack]) if os.environ["ENSO_"+subpack] == "1": print(" ENSO_MSE subpackage ENSO_"+subpack+" active, output will be in " + os.environ["ENSO_MSE_WKDIR_"+subpack]) @@ -53,14 +53,14 @@ print(" ENSO_MSE subpackage ENSO_"+subpack+" off. Turn on by adding line to namelist input: VAR ENSO_"+subpack+" 1 ") -#DRB: unfortunately these don't get copied to namelist_save, which means -#debugging requires starting from this script. To add them here requires -#adding the POD_HOME/util path (easy, see mdtf.py) and getting the envvars -#dict here, but currently the file is written before the pods are called. +# DRB: unfortunately these don't get copied to namelist_save, which means +# debugging requires starting from this script. To add them here requires +# adding the POD_HOME/util path (easy, see mdtf.py) and getting the envvars +# dict here, but currently the file is written before the pods are called. # ================================================================================================== -#### 1. COMPOSITE +# 1. COMPOSITE if os.environ["ENSO_COMPOSITE"] == "1": try: print("=================================================================") @@ -96,13 +96,12 @@ print(" RLUT - TOA outgoing LW ") print("=================================================================") - print("=================================================================") print(" More detailed information regarding the COMPOSITE module is in ") print(" README_LEVEL_01.docx/README_LEVEL_01.pdf files under ~/diagnostics/ENSO_MSE/COMPOSITE/") print("=================================================================") -### set if to run Observational Preprocessing : +# set if to run Observational Preprocessing: if os.environ["ENSO_OBS"] == "1": print("=================================================================") print(" Starting Observational COMPOSITE module ") @@ -115,34 +114,34 @@ print(" Finished Observational COMPOSITE module ") print("=================================================================") -### check for model input dat +# check for model input dat os.system("python "+os.environ["POD_HOME"]+"/COMPOSITE/check_input_files.py") os.system("python "+os.environ["POD_HOME"]+"/COMPOSITE/get_directories.py") os.system("python "+os.environ["POD_HOME"]+"/COMPOSITE/COMPOSITE.py") -### copy the banner file : mdtf_diag_banner.png to "ENSO_MSE_WKDIR" needed by -### individual component html files - file_src = os.environ["POD_HOME"]+"/mdtf_diag_banner.png" +# copy the banner file : mdtf_diag_banner.png to "ENSO_MSE_WKDIR" needed by +# individual component html files + file_src = os.environ["POD_HOME"]+"/mdtf_diag_banner.png" file_dest = os.environ["ENSO_MSE_WKDIR"]+"/mdtf_diag_banner.png" - if os.path.isfile( file_dest ): + if os.path.isfile(file_dest): os.system("rm -f "+file_dest) os.system("cp "+file_src+" "+file_dest) - file_src = os.environ["POD_HOME"]+"/ENSO_MSE.html" + file_src = os.environ["POD_HOME"]+"/ENSO_MSE.html" file_dest = os.environ["ENSO_MSE_WKDIR"]+"/ENSO_MSE.html" - if os.path.isfile( file_dest ): + if os.path.isfile( file_dest): os.system("rm -f "+file_dest) os.system("cp "+file_src+" "+file_dest) - file_src = os.environ["POD_HOME"]+"/doc/ENSO_MSE.pdf" + file_src = os.environ["POD_HOME"]+"/doc/ENSO_MSE.pdf" file_dest = os.environ["ENSO_MSE_WKDIR"]+"/ENSO_MSE.pdf" - if os.path.isfile( file_dest ): - os.system("rm -f "+file_dest) + if os.path.isfile(file_dest): + os.system("rm -f " + file_dest) os.system("cp "+file_src+" "+file_dest) print("=================================================================") print(" COMPOSITES FINISHED ") print("=================================================================") except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) print("COMPOSITE is NOT Executed as Expected!") @@ -179,7 +178,7 @@ print("=================================================================") except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) print("MSE is NOT Executed as Expected!") # 3. MSE variances @@ -213,7 +212,7 @@ print("=================================================================") except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) print("MSE VARIANCE is NOT Executed as Expected!") ##### # 4. CMIP5 scatter plots @@ -240,9 +239,9 @@ print("=================================================================") except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) print("MSE VARIANCE is NOT Executed as Expected!") now = datetime.datetime.now() -print( "FINISHED ENSO_MSE.py on:" + now.strftime("%Y-%m-%d %H:%M")) +print("FINISHED ENSO_MSE.py on:" + now.strftime("%Y-%m-%d %H:%M")) # ====================================================================== diff --git a/diagnostics/ENSO_MSE/MSE/MSE.py b/diagnostics/ENSO_MSE/MSE/MSE.py index 0275eb331..4f1f6ee91 100644 --- a/diagnostics/ENSO_MSE/MSE/MSE.py +++ b/diagnostics/ENSO_MSE/MSE/MSE.py @@ -14,15 +14,13 @@ # programming : Jan Hafner, jhafner@hawaii.edu # # This package is distributed under the LGPLv3 license (see LICENSE.txt) -### -### the OBServational routine just reads and plots -### pre-digested Observational Data -## +# +# the OBServational routine just reads and plots +# pre-digested Observational Data +# import sys - import datetime - import os shared_dir = os.path.join( @@ -79,37 +77,35 @@ ''' -### declaration and set up of relavant directories +# declaration and set up of relavant directories outdir = os.environ["ENSO_MSE_WKDIR_MSE"] + "/model" -## base path of all input files (created by COMPOSITE package) +# base path of all input files (created by COMPOSITE package) now = datetime.datetime.now() print("===============================================================") -print(" Start of Observational Moist Static Energy Module " + now.strftime("%Y-%m-%d %H:%M")) +print(" Start of Observational Moist Static Energy Module " + now.strftime("%Y-%m-%d %H:%M")) print("===============================================================") -print( " ") +print(" ") -### data routine for all El Nino/La Nina cases -generate_ncl_call(os.environ["POD_HOME"]+ "/MSE/NCL_DATA/get_MSE_data.ncl") +# data routine for all El Nino/La Nina cases +generate_ncl_call(os.environ["POD_HOME"] + "/MSE/NCL_DATA/get_MSE_data.ncl") -### plotting routine for all El Nino/La Nina cases -generate_ncl_call(os.environ["POD_HOME"]+ "/MSE/NCL/plot_composite_all.ncl") +# plotting routine for all El Nino/La Nina cases +generate_ncl_call(os.environ["POD_HOME"] + "/MSE/NCL/plot_composite_all.ncl") -file_src = os.environ["POD_HOME"]+"/MSE/MSE.html" +file_src = os.environ["POD_HOME"] + "/MSE/MSE.html" file_dest = os.environ["ENSO_MSE_WKDIR"]+"/MSE.html" -if os.path.isfile( file_dest ): - os.system("rm -f "+file_dest) -os.system("cp "+file_src+" "+file_dest) - - +if os.path.isfile(file_dest): + os.system("rm -f " + file_dest) +os.system("cp " + file_src + " " + file_dest) + now = datetime.datetime.now() -print (" Seasonal Model ENSO MSE composites completed " + now.strftime("%Y-%m-%d %H:%M") ) -print (" plots of ENSO seasonal MSE anomalies finished ") -print (" resulting plots are located in : " + outdir ) -print (" with prefix composite + ELNINO/LANINA + variable name ") -print (" " ) -#============================================================ -############ end +print(" Seasonal Model ENSO MSE composites completed " + now.strftime("%Y-%m-%d %H:%M")) +print(" plots of ENSO seasonal MSE anomalies finished ") +print(" resulting plots are located in : " + outdir) +print(" with prefix composite + ELNINO/LANINA + variable name ") +print(" ") +# ============================================================ diff --git a/diagnostics/ENSO_MSE/MSE/MSE_OBS.py b/diagnostics/ENSO_MSE/MSE/MSE_OBS.py index 671ea3547..b9b7adb63 100644 --- a/diagnostics/ENSO_MSE/MSE/MSE_OBS.py +++ b/diagnostics/ENSO_MSE/MSE/MSE_OBS.py @@ -14,10 +14,9 @@ # programming : Jan Hafner, jhafner@hawaii.edu # # This package is distributed under the LGPLv3 license (see LICENSE.txt) -### -### the OBServational routine just reads and plots -### pre-digested Observational Data -## +# +# the OBServational routine just reads and plots pre-digested Observational Data +# import sys @@ -79,27 +78,26 @@ ''' -### declaration and set up of relavant directories +# declaration and set up of relavant directories outdir = os.environ["ENSO_MSE_WKDIR_MSE"] + "/obs" -## base path of all input files (created by COMPOSITE package) +# base path of all input files (created by COMPOSITE package) now = datetime.datetime.now() print("===============================================================") -print(" Start of Observational Moist Static Energy Module " + now.strftime("%Y-%m-%d %H:%M")) +print(" Start of Observational Moist Static Energy Module " + now.strftime("%Y-%m-%d %H:%M")) print("===============================================================") -print( " ") +print(" ") -### plotting routine for all El Nino/La Nina cases -generate_ncl_call(os.environ["POD_HOME"]+ "/MSE/NCL/plot_composite_all_OBS.ncl") +# plotting routine for all El Nino/La Nina cases +generate_ncl_call(os.environ["POD_HOME"] + "/MSE/NCL/plot_composite_all_OBS.ncl") now = datetime.datetime.now() -print (" Seasonal Observational ENSO MSE composites completed " + now.strftime("%Y-%m-%d %H:%M") ) -print (" plots of ENSO seasonal MSE anomalies finished ") -print (" resulting plots are located in : " + outdir ) -print (" with prefix composite + ELNINO/LANINA + variable name ") -print (" " ) -#============================================================ -############ end +print(" Seasonal Observational ENSO MSE composites completed " + now.strftime("%Y-%m-%d %H:%M") ) +print(" plots of ENSO seasonal MSE anomalies finished ") +print(" resulting plots are located in : " + outdir) +print(" with prefix composite + ELNINO/LANINA + variable name ") +print(" ") +# ============================================================ diff --git a/diagnostics/ENSO_MSE/MSE/NCL_DATA/get_MSE_data.ncl b/diagnostics/ENSO_MSE/MSE/NCL_DATA/get_MSE_data.ncl index a46957cc9..c20005394 100644 --- a/diagnostics/ENSO_MSE/MSE/NCL_DATA/get_MSE_data.ncl +++ b/diagnostics/ENSO_MSE/MSE/NCL_DATA/get_MSE_data.ncl @@ -15,8 +15,8 @@ begin outdir2 = getenv("ENSO_MSE_WKDIR_MSE") + "/model/" ; envvar set in ENSO_MSE.py case = getenv( "CASENAME") - iy1 = toint( ( getenv("FIRSTYR")) ) - iy2 = toint( ( getenv("LASTYR")) ) + iy1 = toint( ( getenv("startdate")) ) + iy2 = toint( ( getenv("enddate")) ) nameout = outdir2 + "netCDF/" ;;; input variables diff --git a/diagnostics/ENSO_MSE/MSE/check_input_files.py b/diagnostics/ENSO_MSE/MSE/check_input_files.py index 29184ca3e..3840a19f7 100644 --- a/diagnostics/ENSO_MSE/MSE/check_input_files.py +++ b/diagnostics/ENSO_MSE/MSE/check_input_files.py @@ -1,8 +1,6 @@ import os import sys -### import xarray as xr - shared_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), @@ -10,45 +8,39 @@ ) sys.path.insert(0, shared_dir) -### -### check the input data in COMPOSITE/model directories -## 3D +# +# check the input data in COMPOSITE/model directories +# 3D size = 12 -vvar = [ "zg", "ua", "va", "ta", "hus", "wap", "pr", "ts", "hfls", "hfss", "lw", "sw" ] -## +vvar = ["zg", "ua", "va", "ta", "hus", "wap", "pr", "ts", "hfls", "hfss", "lw", "sw"] -mode = [ "ELNINO", "LANINA" ] +mode = ["ELNINO", "LANINA"] -## check for missing files +# check for missing files for iv in range(0, size): - for n in range(0, 2): - filevar = os.environ["WK_DIR"] + "/COMPOSITE/model/netCDF/" + mode[n] + "/" + vvar[iv] + ".nc" + filevar = os.environ["WORK_DIR"] + "/COMPOSITE/model/netCDF/" + mode[n] + "/" + vvar[iv] + ".nc" if not os.path.exists(filevar): - print ("=============================================") - print ("=== MISSING INPUT FILE " + filevar ) - print ("==== EXITING =================== ") - ##raw_input("Press any key to continue") + print("=============================================") + print("=== MISSING INPUT FILE " + filevar ) + print("==== EXITING =================== ") + # raw_input("Press any key to continue") sys.exit() - - filevar = os.environ["WK_DIR"] + "/COMPOSITE/model/netCDF/" + vvar[iv] + "_clim.nc" + filevar = os.environ["WORK_DIR"] + "/COMPOSITE/model/netCDF/" + vvar[iv] + "_clim.nc" if not os.path.exists(filevar): - print ("=============================================") - print ("=== MISSING INPUT FILE " + filevar ) - print ("==== EXITING =================== ") - raw_input("Press any key to continue") + print("=============================================") + print("=== MISSING INPUT FILE " + filevar) + print("==== EXITING =================== ") sys.exit() -print (" =========================================================") -print (" ==========================================================") -print ("=========== All model input files found =============== ") -print ( " =========================================================") -print (" ==========================================================") - -#### +print(" =========================================================") +print(" ==========================================================") +print("=========== All model input files found =============== ") +print(" =========================================================") +print(" ==========================================================") diff --git a/diagnostics/ENSO_MSE/MSE/check_input_files_OBS.py b/diagnostics/ENSO_MSE/MSE/check_input_files_OBS.py index da6f64408..c4880fce2 100644 --- a/diagnostics/ENSO_MSE/MSE/check_input_files_OBS.py +++ b/diagnostics/ENSO_MSE/MSE/check_input_files_OBS.py @@ -1,53 +1,48 @@ import os import sys - shared_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'shared' ) sys.path.insert(0, shared_dir) -### -### check the input data in inputdata/obs_data directories DATADIR -#### pre-digested data -## 3D +# +# check the input data in inputdata/obs_data directories DATADIR pre-digested data +# 3D size = 5 -vvar = [ "madv", "mdiv", "mse", "omse", "tadv" ] +vvar = ["madv", "mdiv", "mse", "omse", "tadv"] -## check for missing files -## +# check for missing files -mode = [ "ELNINO", "LANINA" ] +mode = ["ELNINO", "LANINA"] for n in range(0, 2): for iv in range(0, size): - filevar = os.environ["OBS_DATA"] + "/DATA/netCDF/" + mode[n] + "/MSE_" + vvar[iv] + ".nc" + filevar = os.environ["OBS_DATA"] + "/DATA/netCDF/" + mode[n] + "/MSE_" + vvar[iv] + ".nc" if not os.path.exists(filevar): - print ("=============================================") - print ("=== MISSING PRE-DIGESTED OBSERVATIONAL DATA FILE " + filevar ) - print ("==== EXITING =================== ") + print("=============================================") + print("=== MISSING PRE-DIGESTED OBSERVATIONAL DATA FILE " + filevar) + print("==== EXITING =================== ") sys.exit() else: print ("L49 Found "+filevar) - - filevar = os.environ["OBS_DATA"] + "/DATA/netCDF/MSE_" + vvar[iv] + "_clim.nc" + filevar = os.environ["OBS_DATA"] + "/DATA/netCDF/MSE_" + vvar[iv] + "_clim.nc" if not os.path.exists(filevar): - print ("=============================================") - print ("=== MISSING PRE-DIGESTED OBSERVATIONAL DATA FILE " + filevar ) - print ("==== EXITING =================== ") + print("=============================================") + print("=== MISSING PRE-DIGESTED OBSERVATIONAL DATA FILE " + filevar) + print("==== EXITING =================== ") sys.exit() else: - print ("L49 Found "+filevar) - -print (" =========================================================") -print (" ==========================================================") -print (" ==== All Pre-digested Observational files found ======== ") -print (" =========================================================") -print (" ==========================================================") -print (" ==========================================================") -#### + print("L49 Found " + filevar) + +print(" =========================================================") +print(" ==========================================================") +print(" ==== All Pre-digested Observational files found ======== ") +print(" =========================================================") +print(" ==========================================================") +print(" ==========================================================") diff --git a/diagnostics/ENSO_MSE/MSE/get_data_in.py b/diagnostics/ENSO_MSE/MSE/get_data_in.py index 29f89e4f7..e671eec93 100644 --- a/diagnostics/ENSO_MSE/MSE/get_data_in.py +++ b/diagnostics/ENSO_MSE/MSE/get_data_in.py @@ -2,72 +2,73 @@ import os.path import sys + def get_data_in(imax, jmax, zmax, hgt, uu, vv, temp, shum, vvel, prefix, undef): -## print prefix - if (os.path.exists(prefix+"/U.grd")): + # print prefix + if os.path.exists(prefix + "/U.grd"): f = open(prefix+'/U.grd', 'rb') aa1 = np.fromfile(f, dtype='float32') uu = np.reshape( aa1, (imax, jmax, zmax), order='F') uu = np.ma.masked_greater_equal( uu, undef, copy=False) f.close() else: - print (" missing file " + prefix + "/U.grd") - print (" exiting get_data_in.py") + print(" missing file " + prefix + "/U.grd") + print(" exiting get_data_in.py") sys.exit() - if (os.path.exists(prefix+"/V.grd")): - f = open(prefix+'/V.grd', 'rb') + if os.path.exists(prefix + "/V.grd"): + f = open(prefix + '/V.grd', 'rb') aa1 = np.fromfile(f, dtype='float32') - vv = np.reshape( aa1, (imax, jmax, zmax), order='F') - vv = np.ma.masked_greater_equal( vv, undef, copy=False) + vv = np.reshape(aa1, (imax, jmax, zmax), order='F') + vv = np.ma.masked_greater_equal(vv, undef, copy=False) f.close() else: - print (" missing file " + prefix + "/V.grd" ) - print (" exiting get_data_in.py") + print("missing file " + prefix + "/V.grd") + print("exiting get_data_in.py") sys.exit() - if (os.path.exists(prefix+"/T.grd")): - f = open(prefix+'/T.grd', 'rb') - aa1 = np.fromfile(f, dtype='float32') - temp = np.reshape( aa1, (imax, jmax, zmax), order='F') - temp = np.ma.masked_greater_equal( temp, undef, copy=False) + if os.path.exists(prefix + "/T.grd"): + f = open(prefix + '/T.grd', 'rb') + aa1 = np.fromfile(f, dtype='float32') + temp = np.reshape(aa1, (imax, jmax, zmax), order='F') + temp = np.ma.masked_greater_equal(temp, undef, copy=False) f.close() else: - print (" missing file " + prefix + "/T.grd") - print (" exiting get_data_in.py") + print(" missing file " + prefix + "/T.grd") + print(" exiting get_data_in.py") sys.exit() - if (os.path.exists(prefix+"/Q.grd")): - f = open(prefix+'/Q.grd', 'rb') + if os.path.exists(prefix+"/Q.grd"): + f = open(prefix + '/Q.grd', 'rb') aa1 = np.fromfile(f, dtype='float32') shum = np.reshape( aa1, (imax, jmax, zmax), order='F') shum = np.ma.masked_greater_equal( shum, undef, copy=False) f.close() else: - print (" missing file " + prefix + "/Q.grd") - print (" exiting get_data_in.py") + print(" missing file " + prefix + "/Q.grd") + print(" exiting get_data_in.py") sys.exit() - if (os.path.exists(prefix+"/Z.grd")): - f = open(prefix+'/Z.grd', 'rb') - aa1 = np.fromfile(f, dtype='float32') - hgt = np.reshape( aa1, (imax, jmax, zmax), order='F') - hgt = np.ma.masked_greater_equal( hgt, undef, copy=False) + if os.path.exists(prefix + "/Z.grd"): + f = open(prefix + '/Z.grd', 'rb') + aa1 = np.fromfile(f, dtype='float32') + hgt = np.reshape(aa1, (imax, jmax, zmax), order='F') + hgt = np.ma.masked_greater_equal(hgt, undef, copy=False) f.close() else: - print (" missing file " + prefix + "/Z.grd") - print (" exiting get_data_in.py") + print(" missing file " + prefix + "/Z.grd") + print(" exiting get_data_in.py") sys.exit() - if (os.path.exists(prefix+"/OMG.grd")): - f = open(prefix+'/OMG.grd', 'rb') + if os.path.exists(prefix + "/OMG.grd"): + f = open(prefix + '/OMG.grd', 'rb') aa1 = np.fromfile(f, dtype='float32') - vvel = np.reshape( aa1, (imax, jmax, zmax), order='F') - vvel = np.ma.masked_greater_equal( vvel, undef, copy=False) + vvel = np.reshape(aa1, (imax, jmax, zmax), order='F') + vvel = np.ma.masked_greater_equal(vvel, undef, copy=False) f.close() else: - print (" missing file " + prefix + "/OMG.grd") - print (" exiting get_data_in.py") + print(" missing file " + prefix + "/OMG.grd") + print(" exiting get_data_in.py") sys.exit() return hgt, uu, vv, temp, shum, vvel diff --git a/diagnostics/ENSO_MSE/MSE/get_directories.py b/diagnostics/ENSO_MSE/MSE/get_directories.py index 35e260216..07b77d943 100644 --- a/diagnostics/ENSO_MSE/MSE/get_directories.py +++ b/diagnostics/ENSO_MSE/MSE/get_directories.py @@ -8,18 +8,13 @@ sys.path.insert(0, shared_dir) from util import check_required_dirs -### -### def get_directories(): +# def get_directories(): -modeldir = os.environ["ENSO_MSE_WKDIR_MSE"]+"/model" #wkdir, defined in ENSO_MSE.py - -dirs_to_create = [ modeldir+"/PS", - modeldir+"/netCDF/ELNINO" , - modeldir+"/netCDF/LANINA" ] - - -check_required_dirs( already_exist =[], create_if_nec = dirs_to_create, verbose=2) - -### DRB: sym link to obs no longer necessary because everything is written/read to/from WKDIR +modeldir = os.environ["ENSO_MSE_WKDIR_MSE"] + "/model" # wkdir, defined in ENSO_MSE.py +dirs_to_create = [modeldir+"/PS", + modeldir+"/netCDF/ELNINO", + modeldir+"/netCDF/LANINA" + ] +check_required_dirs( already_exist=[], create_if_nec=dirs_to_create, verbose=2) diff --git a/diagnostics/ENSO_MSE/MSE/get_directories_OBS.py b/diagnostics/ENSO_MSE/MSE/get_directories_OBS.py index 829ad39a8..edd91552e 100644 --- a/diagnostics/ENSO_MSE/MSE/get_directories_OBS.py +++ b/diagnostics/ENSO_MSE/MSE/get_directories_OBS.py @@ -8,16 +8,13 @@ sys.path.insert(0, shared_dir) from util import check_required_dirs -### -### def get_directories(): +# def get_directories(): +obsdir = os.environ["ENSO_MSE_WKDIR_MSE"] + "/obs" # wkdir, defined in ENSO_MSE.py -obsdir = os.environ["ENSO_MSE_WKDIR_MSE"]+"/obs" #wkdir, defined in ENSO_MSE.py +dirs_to_create = [obsdir+"/PS", + obsdir+"/netCDF/ELNINO", + obsdir+"/netCDF/LANINA" + ] -dirs_to_create = [ obsdir+"/PS", - obsdir+"/netCDF/ELNINO" , - obsdir+"/netCDF/LANINA" ] - -check_required_dirs( already_exist =[], create_if_nec = dirs_to_create, verbose=2) - -### DRB: sym link to obs no longer necessary because everything is written/read to/from WKDIR +check_required_dirs( already_exist=[], create_if_nec=dirs_to_create, verbose=2) diff --git a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite.ncl b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite.ncl index 1b03cb52d..d004b4b4c 100644 --- a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite.ncl +++ b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite.ncl @@ -20,11 +20,6 @@ begin model = getenv( "CASENAME") -; varcode = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/diagnostics/ENSO_MSE/" -; vardata = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v1/ENSO_MSE/MSE_VAR/" -; outdir = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v1/ENSO_MSE/" -; model = "CESM2" - Varname = "output" parameter_name = varcode + "/shared/parameters.txt" diff --git a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_OBS.ncl b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_OBS.ncl index f8a8342fa..1db9d3fb4 100644 --- a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_OBS.ncl +++ b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_OBS.ncl @@ -15,12 +15,7 @@ begin varcode = getenv("POD_HOME") vardata = getenv("ENSO_MSE_WKDIR_MSE_VAR") outdir = getenv("ENSO_MSE_WKDIR") - model = " " ; leave blank for OBS - -;; varcode = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/diagnostics/ENSO_MSE/" -;; vardata = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v0/ENSO_MSE/MSE_VAR/" -;; outdir = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v0/ENSO_MSE/" - + model = " " ; leave blank for OBS Varname = "output" diff --git a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general.ncl b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general.ncl index 6937eccfd..18a9aa13c 100644 --- a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general.ncl +++ b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general.ncl @@ -8,8 +8,6 @@ begin factor = 1./ff ;;; get in the general custom selectable domain lat/lons -;; composite_dir_data = getenv("ENSO_MSE_WKDIR_COMPOSITE")+"/model/netCDF/DATA/" - slon1 = tofloat( getenv("slon1")) slon2 = tofloat( getenv("slon2")) slat1 = tofloat( getenv("slat1")) @@ -21,10 +19,6 @@ begin model = getenv( "CASENAME") -;; varcode = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/diagnostics/ENSO_MSE/" -;; vardata = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v1/ENSO_MSE/MSE_VAR/" -;; outdir = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v1/ENSO_MSE/" -;; model = "CESM2" slon1 = 160. slon2 = 200. diff --git a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general_OBS.ncl b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general_OBS.ncl index a18296ae7..b12419402 100644 --- a/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general_OBS.ncl +++ b/diagnostics/ENSO_MSE/MSE_VAR/NCL/plot_bars_composite_general_OBS.ncl @@ -13,15 +13,7 @@ begin slat2 = tofloat( getenv("slat2")) varcode = getenv("POD_HOME") vardata = getenv("ENSO_MSE_WKDIR_MSE_VAR") - outdir = getenv("ENSO_MSE_WKDIR") - -; varcode = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/diagnostics/ENSO_MSE/" -; vardata = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v0/ENSO_MSE/MSE_VAR/" -; outdir = "/export/oat/hafner/gyoji7/VARIOUS/HANA/MSE_SRC/PYTHON/MDTF_2021-07-06/wkdir/MDTF_CESM2_1950_2005.v0/ENSO_MSE/" -; slon1 = 160. -; slon2 = 200. -; slat1 = -10. -; slat2 = 5. + outdir = getenv("ENSO_MSE_WKDIR") model = " " Varname = "output" diff --git a/diagnostics/ENSO_MSE/SCATTER/SCATTER.html b/diagnostics/ENSO_MSE/SCATTER/SCATTER.html index 2157cb7c2..9891037a0 100644 --- a/diagnostics/ENSO_MSE/SCATTER/SCATTER.html +++ b/diagnostics/ENSO_MSE/SCATTER/SCATTER.html @@ -27,56 +27,54 @@

-

ENSO Moist Static Energy Diagnostics


-

RESULTS:  SCATTER PLOTS  MSE TERMS versus PRECIPITATION

+

< color="#000080">RESULTS:  SCATTER PLOTS  MSE TERMS versus PRECIPITATION

-

Domain:  Central Pacific - +

< color="#000080">Domain:  Central Pacific< color="#000080">

-

Precipitation versus

+

< color="#000080">Precipitation versus>

-

graphics

+

< color="#000080">graphics

-

Horizontal moist  advection

+

< color="#000080">Horizontal moist advection

-

plot

+

< color="#000080">plot

-

Net Radiative Flux (Frad)

+

< color="#000080">Net Radiative Flux (Frad)

-

plot

+

< color="#000080">plot

-

Vertical Advection of Moist Static Energy

+

< color="#000080">Vertical Advection of Moist Static Energy

-

plot

+

< color="#000080">plot

-

Total heat flux  THF

+

< color="#000080">Total heat flux  THF

-

plot

+

< color="#000080">plot

@@ -84,48 +82,48 @@

ENSO Moist Static Energy Diagnostics


-

Domain:  Eastern Pacific

+

< color="#000080">Domain:  Eastern Pacific

-

Precipitation versus

+

< color="#000080">Precipitation versus

-

graphics

+

< color="#000080">graphics

-

Horizontal moist  advection

+

< color="#000080">Horizontal moist  advection

-

plot

+

< color="#000080">plot

-

Net Radiative Flux (Frad)

+

< color="#000080">Net Radiative Flux (Frad)

-

plot

+

< color="#000080">plot

-

Vertical Advection of Moist Static Energy

+

< color="#000080">Vertical Advection of Moist Static Energy

-

plot

+

< color="#000080">plot

-

Total heat flux  THF

+

< color="#000080">Total heat flux  THF

-

plot

+

< color="#000080">plot

diff --git a/diagnostics/ENSO_MSE/SCATTER/SCATTER.py b/diagnostics/ENSO_MSE/SCATTER/SCATTER.py index abaca7ffa..608a0a3cc 100644 --- a/diagnostics/ENSO_MSE/SCATTER/SCATTER.py +++ b/diagnostics/ENSO_MSE/SCATTER/SCATTER.py @@ -14,14 +14,11 @@ # # This package is distributed under the LGPLv3 license (see LICENSE.txt) - import sys import time - import datetime import os - shared_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'shared' @@ -56,50 +53,47 @@ now = datetime.datetime.now() print("===============================================================") -print(" Start of Scatter Plot Module calculations " + now.strftime("%Y-%m-%d %H:%M")) +print(" Start of Scatter Plot Module calculations " + now.strftime("%Y-%m-%d %H:%M")) print("===============================================================") undef = float(1.1e+20) -### first prefix for data + other variables passed to the code -wkdir = os.environ["ENSO_MSE_WKDIR"] +# first prefix for data + other variables passed to the code +wkdir = os.environ["ENSO_MSE_WKDIR"] -#### creating data fro NEW MODEL to be plotted in SCATTER plots -### Central Pacific +# creating data fro NEW MODEL to be plotted in SCATTER plots +# Central Pacific clon1 = 160. clon2 = 200. clat1 = -10. clat2 = 5. -## and Eastern Pacific +# and Eastern Pacific elon1 = 220. elon2 = 280. elat1 = -5. elat2 = 5. -#### call the model routine to select NEW MODEL data for SCATTER plots -############################# +# call the model routine to select NEW MODEL data for SCATTER plots time.sleep(6.) -### make the plots in NCL -#### default domain plotting -generate_ncl_call(os.environ["POD_HOME"]+ "/SCATTER/NCL/get_scatter_data.ncl") +# make the plots in NCL +# default domain plotting +generate_ncl_call(os.environ["POD_HOME"] + "/SCATTER/NCL/get_scatter_data.ncl") time.sleep(6.) -generate_ncl_call(os.environ["POD_HOME"]+ "/SCATTER/NCL/scatter_01.ncl") -generate_ncl_call(os.environ["POD_HOME"]+ "/SCATTER/NCL/scatter_02.ncl") -generate_ncl_call(os.environ["POD_HOME"]+ "/SCATTER/NCL/scatter_03.ncl") -generate_ncl_call(os.environ["POD_HOME"]+ "/SCATTER/NCL/scatter_04.ncl") +generate_ncl_call(os.environ["POD_HOME"] + "/SCATTER/NCL/scatter_01.ncl") +generate_ncl_call(os.environ["POD_HOME"] + "/SCATTER/NCL/scatter_02.ncl") +generate_ncl_call(os.environ["POD_HOME"] + "/SCATTER/NCL/scatter_03.ncl") +generate_ncl_call(os.environ["POD_HOME"] + "/SCATTER/NCL/scatter_04.ncl") -### copy the html files for to create webpages -if os.path.isfile( os.environ["ENSO_MSE_WKDIR"]+"/SCATTER.html" ): - os.system("rm -f "+os.environ["ENSO_MSE_WKDIR"]+"/SCATTER/SCATTER.html") +# copy the html files for to create webpages +if os.path.isfile(os.environ["ENSO_MSE_WKDIR"] + "/SCATTER.html"): + os.system("rm -f " + os.environ["ENSO_MSE_WKDIR"] + "/SCATTER/SCATTER.html") -os.system("cp "+os.environ["POD_HOME"]+"/SCATTER/SCATTER.html "+os.environ["ENSO_MSE_WKDIR"] ) +os.system("cp " + os.environ["POD_HOME"] + "/SCATTER/SCATTER.html "+os.environ["ENSO_MSE_WKDIR"]) -### the end now = datetime.datetime.now() -print(" " ) -print( " ===================================================================") -print( " Scatter Module Finished " + now.strftime("%Y-%m-%d %H:%M") ) -print( " resulting plots are located in : " +os.environ["ENSO_MSE_WKDIR"],"/SCATTER/") -print( " ===================================================================") -### +print(" ") +print(" ===================================================================") +print(" Scatter Module Finished " + now.strftime("%Y-%m-%d %H:%M")) +print(" resulting plots are located in : " +os.environ["ENSO_MSE_WKDIR"], "/SCATTER/") +print(" ===================================================================") diff --git a/diagnostics/ENSO_MSE/SCATTER/check_input_files.py b/diagnostics/ENSO_MSE/SCATTER/check_input_files.py index b747f6496..137fdbfd6 100644 --- a/diagnostics/ENSO_MSE/SCATTER/check_input_files.py +++ b/diagnostics/ENSO_MSE/SCATTER/check_input_files.py @@ -1,11 +1,6 @@ -import os.path import sys import os - -## -## -### check the input data in inputdata/model directories required for SCATTER routine -## +# check the input data in inputdata/model directories required for SCATTER routine shared_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), @@ -13,45 +8,44 @@ ) sys.path.insert(0, shared_dir) -wkdir = os.environ["ENSO_MSE_WKDIR"] +wkdir = os.environ["ENSO_MSE_WKDIR"] vardir = os.environ["POD_HOME"] obsdata = os.environ["OBS_DATA"] -### checking the output direcories and create if missing -if not os.path.exists( wkdir + "/SCATTER/" ): - os.makedirs( wkdir + "/SCATTER/" ) +# checking the output direcories and create if missing +if not os.path.exists(wkdir + "/SCATTER/" ): + os.makedirs(wkdir + "/SCATTER/" ) -if not os.path.exists( wkdir + "/SCATTER/netCDF" ): - os.makedirs( wkdir + "/SCATTER/netCDF" ) +if not os.path.exists(wkdir + "/SCATTER/netCDF" ): + os.makedirs(wkdir + "/SCATTER/netCDF" ) -if not os.path.exists( wkdir + "/SCATTER/PS" ): - os.makedirs( wkdir + "/SCATTER/PS" ) +if not os.path.exists(wkdir + "/SCATTER/PS" ): + os.makedirs(wkdir + "/SCATTER/PS" ) -#### copy pre-calculated scatter data to working directory from inputdata/obs_data/SCATTER -dest = wkdir + "/SCATTER/netCDF/" -namein1 = obsdata + "/SCATTER/central_pacific_MSE_terms.txt" -namein2 = obsdata + "/SCATTER/eastern_pacific_MSE_terms.txt" -namein3 = obsdata + "/SCATTER/list-models-historical-obs" +# copy pre-calculated scatter data to working directory from inputdata/obs_data/SCATTER +dest = wkdir + "/SCATTER/netCDF/" +namein1 = obsdata + "/SCATTER/central_pacific_MSE_terms.txt" +namein2 = obsdata + "/SCATTER/eastern_pacific_MSE_terms.txt" +namein3 = obsdata + "/SCATTER/list-models-historical-obs" -os.system( 'cp ' + namein1 + ' ' + dest ) -os.system( 'cp ' + namein2 + ' ' + dest ) -os.system( 'cp ' + namein3 + ' ' + dest ) +os.system('cp ' + namein1 + ' ' + dest) +os.system('cp ' + namein2 + ' ' + dest) +os.system('cp ' + namein3 + ' ' + dest) -###### check for each input model data .. -namein = dest + "central_pacific_MSE_terms.txt" +# check for each input model data .. +namein = dest + "central_pacific_MSE_terms.txt" if not os.path.exists( namein): - print ("=============================================") - print ("=== MISSING FILE for SCATTER =====" ) - print ( namein ) - exit() + print("=============================================") + print("=== MISSING FILE for SCATTER =====") + print(namein) + sys.exit(1) namein = dest + "eastern_pacific_MSE_terms.txt" -if not os.path.exists( namein): - print ("=============================================") - print ("=== MISSING FILE for SCATTER =====" ) - print ( namein ) - exit() - -print( "=============================================") -print( " SCATTER input file check COMPLETED ") -print( "=============================================") -#### +if not os.path.exists(namein): + print("=============================================") + print("=== MISSING FILE for SCATTER =====" ) + print(namein) + sys.exit(1) + +print("=============================================") +print(" SCATTER input file check COMPLETED ") +print("=============================================") diff --git a/diagnostics/ENSO_MSE/html/index.html b/diagnostics/ENSO_MSE/html/index.html index 10a1ed39b..73b6cc4bc 100644 --- a/diagnostics/ENSO_MSE/html/index.html +++ b/diagnostics/ENSO_MSE/html/index.html @@ -9,8 +9,9 @@ Documentation and Contact Information -

MDTF Variability Diagnostics

+< color=navy>

MDTF Variability Diagnostics


-

Composite plots plots

-

moist static energy budget plots

-

moist static energy variance plots

+

< color=navy>Composite plots plots

+

< color=navy>moist static energy budget plots

+

< color=navy>moist static energy variance plots

+
diff --git a/diagnostics/ENSO_MSE/html/index_mdtf_03.html b/diagnostics/ENSO_MSE/html/index_mdtf_03.html index 060a285b4..3e7729db3 100644 --- a/diagnostics/ENSO_MSE/html/index_mdtf_03.html +++ b/diagnostics/ENSO_MSE/html/index_mdtf_03.html @@ -7,8 +7,9 @@
-

MDTF Variability Diagnostics

+< color=navy>

MDTF Variability Diagnostics


-

Composite plots plots

-

moist static energy budget plots

-

moist static energy variance plots

+

< color=navy>Composite plots plots

+

< color=navy>moist static energy budget plots

+

< color=navy>moist static energy variance plots

+
\ No newline at end of file diff --git a/diagnostics/ENSO_MSE/html/mdtf_composite.html b/diagnostics/ENSO_MSE/html/mdtf_composite.html index 638c236fd..faa422926 100644 --- a/diagnostics/ENSO_MSE/html/mdtf_composite.html +++ b/diagnostics/ENSO_MSE/html/mdtf_composite.html @@ -7,5 +7,6 @@
-

MDTF Variability Diagnostics

+< color=navy>

MDTF Variability Diagnostics


+
\ No newline at end of file diff --git a/diagnostics/ENSO_MSE/settings.jsonc b/diagnostics/ENSO_MSE/settings.jsonc index 127aadaa9..582bf76a4 100644 --- a/diagnostics/ENSO_MSE/settings.jsonc +++ b/diagnostics/ENSO_MSE/settings.jsonc @@ -2,6 +2,7 @@ "settings": { "driver": "ENSO_MSE.py", "long_name": "ENSO moist static energy budget", + "convention": "cesm", "description": "ENSO moist static energy budget", "runtime_requirements": { "ncl": ["contributed", "gsn_code", "gsn_csm", "shea_util", "calendar_decode2"] @@ -11,8 +12,16 @@ "frequency": "mon" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -37,101 +46,118 @@ "zg": { "standard_name": "geopotential_height", "units": "m", - "dimensions": ["time", "lev", "lat", "lon"], - "freq": "mon" + "realm": "atmos", + "dimensions": ["time", "lev", "lat", "lon"], + "freq": "mon" }, "ua": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "va": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "ta": { - "standard_name": "air_temperature", + "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "hus": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "wap": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "ts": { "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "hfss": { "standard_name": "surface_upward_sensible_heat_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "hfls": { "standard_name": "surface_upward_latent_heat_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rsus": { "standard_name": "surface_upwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rsds": { "standard_name": "surface_downwelling_shortwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rsdt": { "standard_name": "toa_incoming_shortwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rsut": { "standard_name": "toa_outgoing_shortwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rlus": { "standard_name": "surface_upwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rlds": { "standard_name": "surface_downwelling_longwave_flux_in_air", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"], "freq": "mon" diff --git a/diagnostics/ENSO_MSE/shared/generate_ncl_call.py b/diagnostics/ENSO_MSE/shared/generate_ncl_call.py index 04f63c5cd..ed6333310 100644 --- a/diagnostics/ENSO_MSE/shared/generate_ncl_call.py +++ b/diagnostics/ENSO_MSE/shared/generate_ncl_call.py @@ -1,23 +1,24 @@ import subprocess -#============================================================ +# ============================================================ # generate_ncl_call - call a ncl_script via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_call(ncl_script): """generate_plots_call - call a ncl_script via subprocess call Arguments: ncl_script (string) - full path to ncl plotting file name """ - # check if the ncl_script exists - - # don't exit if it does not exists just print a warning. + # check if the ncl_script exists - + # don't exit if it does not exists just print a warning. try: - pipe = subprocess.Popen(['ncl -Q {0}'.format(ncl_script)], shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + pipe = subprocess.Popen(['ncl -Q {0}'.format(ncl_script)], shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = pipe.communicate()[0] output = '\t' + str(output).replace('\n','\n\t') print('NCL routine {0}:\n{1}'.format(ncl_script, output)) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 - diff --git a/diagnostics/ENSO_MSE/shared/get_parameters_in.py b/diagnostics/ENSO_MSE/shared/get_parameters_in.py index 11a6f4802..049013073 100644 --- a/diagnostics/ENSO_MSE/shared/get_parameters_in.py +++ b/diagnostics/ENSO_MSE/shared/get_parameters_in.py @@ -1,37 +1,39 @@ import os.path import sys -def get_parameters_in(lon1, lon2, lat1, lat2, sigma, imindx1, imindx2, composite, im1, im2, season, composite24, regression, correlation, undef, prefix): -## read in all parameter data + +def get_parameters_in(lon1, lon2, lat1, lat2, sigma, imindx1, imindx2, composite, im1, + im2, season, composite24, regression, correlation, undef, prefix): + # read in all parameter data undef = float( 1.1E+20) file_path = os.path.join(prefix,"../shared","parameters.txt") - if (os.path.exists(file_path)): + if os.path.exists(file_path): file = open(file_path, 'r') line = file.readline() line = line.strip() column = line.split() - lon1 = float(column[2]) + lon1 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - lon2 = float(column[2]) + lon2 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - lat1 = float(column[2]) + lat1 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - lat2 = float(column[2]) + lat2 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - sigma = float(column[2]) + sigma = float(column[2]) line = file.readline() line = line.strip() @@ -42,7 +44,7 @@ def get_parameters_in(lon1, lon2, lat1, lat2, sigma, imindx1, imindx2, composi line = line.strip() column = line.split() imindx2 = int( column[2]) - if( imindx2 < imindx1): + if imindx2 < imindx1: imindx2 = imindx2 + 12 line = file.readline() @@ -50,47 +52,45 @@ def get_parameters_in(lon1, lon2, lat1, lat2, sigma, imindx1, imindx2, composi column = line.split() composite = int( column[2]) -# line = file.readline() - line = file.readline() line = line.strip() column = line.split() - im1 = int( column[2]) + im1 = int(column[2]) line = file.readline() line = line.strip() column = line.split() - im2 = int( column[2]) - if( im2 < im1): + im2 = int(column[2]) + if im2 < im1: im2 = im2 + 12 line = file.readline() line = line.strip() column = line.split() - season = column[2] + season = column[2] -#### composite evolution 24 month switches +# composite evolution 24 month switches line = file.readline() line = line.strip() column = line.split() composite24 = int(column[2]) -#### regression /correlation +# regression /correlation line = file.readline() line = line.strip() column = line.split() - regression = int( column[2]) + regression = int(column[2]) line = file.readline() line = line.strip() column = line.split() - correlation = int( column[2]) + correlation = int(column[2]) file.close() else: - print (" missing file: ", file_path) - print (" exiting get_parameters_in.py ") - sys.exit() - return lon1, lon2, lat1, lat2, sigma, imindx1, imindx2, composite, im1, im2, season, composite24, regression, correlation, undef - + print(" missing file: ", file_path) + print(" exiting get_parameters_in.py ") + sys.exit(1) + return (lon1, lon2, lat1, lat2, sigma, imindx1, imindx2, composite, + im1, im2, season, composite24, regression, correlation, undef) \ No newline at end of file diff --git a/diagnostics/ENSO_MSE/shared/set_variables_AM4.py b/diagnostics/ENSO_MSE/shared/set_variables_AM4.py index d2d95d5f8..46e8e35c7 100644 --- a/diagnostics/ENSO_MSE/shared/set_variables_AM4.py +++ b/diagnostics/ENSO_MSE/shared/set_variables_AM4.py @@ -8,15 +8,15 @@ os.environ["lon_var"] = "lon" os.environ["time_var"] = "time" os.environ["ps_var"] = "PS" -os.environ["pr_conversion_factor"] = "1" #units = m/s -os.environ["prc_conversion_factor"] = "1" #units = m/s -os.environ["prls_conversion_factor"] = "1" #units = m/s +os.environ["pr_conversion_factor"] = "1" #units = m/s +os.environ["prc_conversion_factor"] = "1" #units = m/s +os.environ["prls_conversion_factor"] = "1" #units = m/s # ------------------------------------------------------------------------ # Variables for Convective Transition Diagnostics module: -os.environ["ta_var"] = "ta" # 3D temperature, units = K -os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) -os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K -os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) +os.environ["ta_var"] = "ta" # 3D temperature, units = K +os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) +os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K +os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) # End - Variables for Convective Transition Diagnostics package # ------------------------------------------------------------------------ diff --git a/diagnostics/ENSO_MSE/shared/set_variables_CESM.py b/diagnostics/ENSO_MSE/shared/set_variables_CESM.py index 15289bee0..aed3538ff 100644 --- a/diagnostics/ENSO_MSE/shared/set_variables_CESM.py +++ b/diagnostics/ENSO_MSE/shared/set_variables_CESM.py @@ -13,7 +13,7 @@ os.environ["ua_var"] = "U" os.environ["va_var"] = "V" os.environ["zg_var"] = "Z3" -os.environ["ta_var"] = "T" # 3D temperature, units = K +os.environ["ta_var"] = "T" # 3D temperature, units = K os.environ["qa_var"] = "Q" os.environ["omega_var"] = "OMEGA" @@ -40,15 +40,14 @@ os.environ["v850_var"] = "V850" os.environ["omega500_var"] = "OMEGA500" -### os.environ["pr_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prc_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prls_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prc_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prls_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) # ------------------------------------------------------------------------ # Variables for Convective Transition Diagnostics module: -os.environ["ta_var"] = "T" # 3D temperature, units = K -os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) -os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K -os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) +os.environ["ta_var"] = "T" # 3D temperature, units = K +os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) +os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K +os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) # End - Variables for Convective Transition Diagnostics package # ------------------------------------------------------------------------ diff --git a/diagnostics/ENSO_MSE/shared/set_variables_CMIP.py b/diagnostics/ENSO_MSE/shared/set_variables_CMIP.py index 7b25fc17f..3833d9d30 100644 --- a/diagnostics/ENSO_MSE/shared/set_variables_CMIP.py +++ b/diagnostics/ENSO_MSE/shared/set_variables_CMIP.py @@ -13,18 +13,18 @@ os.environ["ua_var"] = "ua" os.environ["va_var"] = "va" os.environ["zg_var"] = "zg" -os.environ["ta_var"] = "ta" # 3D temperature, units = K +os.environ["ta_var"] = "ta" # 3D temperature, units = K os.environ["qa_var"] = "hus" os.environ["omega_var"] = "wap" os.environ["ts_var"] = "ts" os.environ["pr_var"] = "pr" -## added heat fluxes 2018-12-11 +# added heat fluxes 2018-12-11 os.environ["hfss_var"] = "hfss" os.environ["hfls_var"] = "hfls" -### radiative fluxes +# radiative fluxes os.environ["rsus_var"] = "rsus" os.environ["rsds_var"] = "rsds" os.environ["rsdt_var"] = "rsdt" @@ -34,7 +34,7 @@ os.environ["rlds_var"] = "rlds" os.environ["rlut_var"] = "rlut" -### rest of the variables +# rest of the variables os.environ["prc_var"] = "prc" os.environ["prls_var"] = "prls" os.environ["rlut_var"] = "rlut" @@ -52,14 +52,14 @@ os.environ["v850_var"] = "V850" os.environ["omega500_var"] = "OMEGA500" -os.environ["pr_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prc_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prls_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["pr_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prc_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prls_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) # ------------------------------------------------------------------------ # Variables for Convective Transition Diagnostics module: -os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) -os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K -os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) +os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) +os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K +os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) # End - Variables for Convective Transition Diagnostics package # ------------------------------------------------------------------------ diff --git a/diagnostics/ENSO_MSE/shared/util.py b/diagnostics/ENSO_MSE/shared/util.py index b4dadc8b1..77450a113 100644 --- a/diagnostics/ENSO_MSE/shared/util.py +++ b/diagnostics/ENSO_MSE/shared/util.py @@ -1,45 +1,48 @@ +import sys +import os -def setenv (varname,varvalue,env_dict,verbose=0): - import os +def setenv (varname, varvalue, env_dict, verbose=0): # Not currently used. Needs to be a dictionary to be dumped once file is created # # Ideally this could be a wrapper to os.environ so any new env vars # automatically get written to the file - "replaces os.environ to set the variable AND save it to write out in namelist" + # replaces os.environ to set the variable AND save it to write out in namelist - if (verbose > 2 ): print ("Saving ",varname," = ",varvalue) + if verbose > 2: + print("Saving ",varname," = ",varvalue) os.environ[varname] = varvalue - env_dict[varname] = varvalue - if ( verbose > 2) : print ("Check ",varname," ",env_dict[varname]) + env_dict[varname] = varvalue + if verbose > 2: + print("Check ", varname, " ", env_dict[varname]) - -def check_required_dirs(verbose=3, already_exist =[], create_if_nec = []): +def check_required_dirs(verbose=3, already_exist=[], create_if_nec=[]): # arguments can be envvar name or just the paths - - import os - filestr = __file__+":check_required_dirs: " - errstr = "ERROR "+filestr - if verbose > 1: print (filestr +" starting") - for dir_in in already_exist + create_if_nec : - if verbose > 1: print ("\t looking for required dir: "+dir_in ) + filestr = __file__ + ":check_required_dirs: " + errstr = "ERROR " + filestr + if verbose > 1: + print(filestr + " starting") + for dir_in in already_exist + create_if_nec: + if verbose > 1: + print("\t looking for required dir: "+ dir_in ) if dir_in in os.environ: dir = os.environ[dir_in] - if verbose>2: print(" \t found "+dir_in+" = "+dir) + if verbose > 2: + print(" \t found " + dir_in + " = " + dir) else: - if verbose>2: print(" envvar "+dir_in+" not defined. Setting to self.") + if verbose>2: print(" envvar " + dir_in + " not defined. Setting to self.") dir = dir_in if not os.path.exists(dir): if not dir_in in create_if_nec: - if (verbose>0): - print(errstr+dir_in+" = "+dir+" directory does not exist") - print(" and not create_if_nec list: ",create_if_nec) - exit() + if verbose > 0: + print(errstr + dir_in + " = " + dir + " directory does not exist") + print(" and not create_if_nec list: ", create_if_nec) + sys.exit(1) else: - print(dir_in+" = "+dir+" created") + print(dir_in + " = " + dir + " created") os.makedirs(dir) else: - print("Found "+dir) + print("Found " + dir) diff --git a/diagnostics/ENSO_RWS/ENSO_RWS.py b/diagnostics/ENSO_RWS/ENSO_RWS.py index 902354817..00102d344 100755 --- a/diagnostics/ENSO_RWS/ENSO_RWS.py +++ b/diagnostics/ENSO_RWS/ENSO_RWS.py @@ -17,13 +17,13 @@ # ====================================================================== # ======================================================================` -### the switches to select LEVEL_04 or LEVEL_05 either 0 or 1 but not both the same -level4 = 1 -level5 = 0 +# The switches to select LEVEL_04 or LEVEL_05 either 0 or 1 but not both the same +level4 = 1 +level5 = 0 print( "Starting ENSO_RWS.py ") -os.environ["ENSO_RWS_WKDIR"] = os.environ["WK_DIR"] +os.environ["ENSO_RWS_WKDIR"] = os.environ["WORK_DIR"] #DRB: unfortunately these don't get copied to namelist_save, which means #debugging requires starting from this script. To add them here requires diff --git a/diagnostics/ENSO_RWS/ENSO_RWS_05.html b/diagnostics/ENSO_RWS/ENSO_RWS_05.html index bbc634dd9..b62f08abb 100644 --- a/diagnostics/ENSO_RWS/ENSO_RWS_05.html +++ b/diagnostics/ENSO_RWS/ENSO_RWS_05.html @@ -30,9 +30,9 @@ results are illustrated here.

-

ENSO Rossby wave diagnostics

+< color=navy>

ENSO Rossby wave diagnostics


-

LEVEL_01 plots

-

LEVEL_02 plots

-

LEVEL_03 plots

-

LEVEL_05 plots

+

< color=navy>LEVEL_01 plots

+

< color=navy>LEVEL_02 plots

+

< color=navy>LEVEL_03 plots

+

< color=navy>LEVEL_05 plots

\ No newline at end of file diff --git a/diagnostics/ENSO_RWS/LEVEL_01/NCL/data_routine.ncl b/diagnostics/ENSO_RWS/LEVEL_01/NCL/data_routine.ncl index f77044b00..6c20c99fa 100644 --- a/diagnostics/ENSO_RWS/LEVEL_01/NCL/data_routine.ncl +++ b/diagnostics/ENSO_RWS/LEVEL_01/NCL/data_routine.ncl @@ -11,8 +11,8 @@ begin outdir = getenv("ENSO_RWS_WKDIR") + "/model" ; envvar set in ENSO_MSE.py case = getenv( "CASENAME") - iy1 = toint( ( getenv("FIRSTYR")) ) - iy2 = toint( ( getenv("LASTYR")) ) + iy1 = toint( ( getenv("startdate")) ) + iy2 = toint( ( getenv("enddate")) ) ;;; selected variables import from system defined vars. ;; 3 D vars diff --git a/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_RWS_aUVdiv_cvort_overlay.ncl b/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_RWS_aUVdiv_cvort_overlay.ncl index 66ab4ce12..0d2a3dcaa 100644 --- a/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_RWS_aUVdiv_cvort_overlay.ncl +++ b/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_RWS_aUVdiv_cvort_overlay.ncl @@ -65,7 +65,7 @@ begin ;; levels1 = RWS terms levels1 = (/ -30, -27, -24, -21, -18, -15, -12, -9, -6, 6, 9, 12, 15, 18, 21, 24, 27, 30 /) -;;; absolute corticity +;;; absolute vorticity cmin4 = -16 ;; cmax4 = 16 ;; diff --git a/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_divergent_wind_composite.ncl b/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_divergent_wind_composite.ncl index 100577085..e58153f59 100644 --- a/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_divergent_wind_composite.ncl +++ b/diagnostics/ENSO_RWS/LEVEL_01/NCL/plot_divergent_wind_composite.ncl @@ -3,7 +3,7 @@ begin - varcode = getenv("POD_HOME") + varcode = getenv("POD_HOME") dirname1 = getenv("ENSO_RWS_WKDIR") obsname = getenv("OBS_DATA") + "/DATA/" diff --git a/diagnostics/ENSO_RWS/LEVEL_01/NCL_DATA/get_composites.ncl b/diagnostics/ENSO_RWS/LEVEL_01/NCL_DATA/get_composites.ncl index 7ac91eb7e..c430ad258 100644 --- a/diagnostics/ENSO_RWS/LEVEL_01/NCL_DATA/get_composites.ncl +++ b/diagnostics/ENSO_RWS/LEVEL_01/NCL_DATA/get_composites.ncl @@ -11,8 +11,8 @@ begin case = getenv( "CASENAME") codedata = getenv ("POD_HOME") - iy1 = toint( ( getenv("FIRSTYR")) ) - iy2 = toint( ( getenv("LASTYR")) ) + iy1 = toint( ( getenv("startdate")) ) + iy2 = toint( ( getenv("enddat")) ) Vars3 = (/ "zg", "ua", "va", "ta", "wap" /) diff --git a/diagnostics/ENSO_RWS/LEVEL_02/LEVEL_02.py b/diagnostics/ENSO_RWS/LEVEL_02/LEVEL_02.py index b7f4dbebc..771fba855 100644 --- a/diagnostics/ENSO_RWS/LEVEL_02/LEVEL_02.py +++ b/diagnostics/ENSO_RWS/LEVEL_02/LEVEL_02.py @@ -10,11 +10,10 @@ # programming : Jan Hafner, jhafner@hawaii.edu # # -## This package is distributed under the LGPLv3 license (see LICENSE.txt) +# This package is distributed under the LGPLv3 license (see LICENSE.txt) import sys import os - import datetime shared_dir = os.path.join( @@ -60,14 +59,14 @@ generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_02/NCL/plot_U_wind_clima.ncl") generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_02/NCL/plot_wave_number_clima.ncl") -## copy the html file -file_src = os.environ["POD_HOME"]+"/LEVEL_02/LEVEL_02.html" -file_dest = os.environ["ENSO_RWS_WKDIR"]+"/LEVEL_02.html" -if os.path.isfile( file_dest ): - os.system("rm -f "+file_dest) -os.system("cp "+file_src+" "+file_dest) +# copy the html file +file_src = os.environ["POD_HOME"] + "/LEVEL_02/LEVEL_02.html" +file_dest = os.environ["ENSO_RWS_WKDIR"] + "/LEVEL_02.html" +if os.path.isfile(file_dest): + os.system("rm -f " + file_dest) +os.system("cp " + file_src + " " + file_dest) now = datetime.datetime.now() -print (" LEVEL_02 completed " + now.strftime("%Y-%m-%d %H:%M") ) +print(" LEVEL_02 completed " + now.strftime("%Y-%m-%d %H:%M")) diff --git a/diagnostics/ENSO_RWS/LEVEL_03/LEVEL_03.py b/diagnostics/ENSO_RWS/LEVEL_03/LEVEL_03.py index 9bb35df89..3618ae8c2 100644 --- a/diagnostics/ENSO_RWS/LEVEL_03/LEVEL_03.py +++ b/diagnostics/ENSO_RWS/LEVEL_03/LEVEL_03.py @@ -55,14 +55,12 @@ generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_03/NCL/plot_RWS_composite.ncl") -## copy the html file -file_src = os.environ["POD_HOME"]+"/LEVEL_03/LEVEL_03.html" -file_dest = os.environ["ENSO_RWS_WKDIR"]+"/LEVEL_03.html" -if os.path.isfile( file_dest ): - os.system("rm -f "+file_dest) -os.system("cp "+file_src+" "+file_dest) - +# copy the html file +file_src = os.environ["POD_HOME"] + "/LEVEL_03/LEVEL_03.html" +file_dest = os.environ["ENSO_RWS_WKDIR"] + "/LEVEL_03.html" +if os.path.isfile(file_dest): + os.system("rm -f " + file_dest) +os.system("cp " + file_src + " " + file_dest) now = datetime.datetime.now() -print (" LEVEL_03 completed " + now.strftime("%Y-%m-%d %H:%M") ) - +print(" LEVEL_03 completed " + now.strftime("%Y-%m-%d %H:%M")) diff --git a/diagnostics/ENSO_RWS/LEVEL_04/LEVEL_04.py b/diagnostics/ENSO_RWS/LEVEL_04/LEVEL_04.py index 437880653..cf14c9544 100644 --- a/diagnostics/ENSO_RWS/LEVEL_04/LEVEL_04.py +++ b/diagnostics/ENSO_RWS/LEVEL_04/LEVEL_04.py @@ -11,11 +11,10 @@ # programming : Jan Hafner, jhafner@hawaii.edu # # -## This package is distributed under the LGPLv3 license (see LICENSE.txt) +# This package is distributed under the LGPLv3 license (see LICENSE.txt) import sys import os - import datetime shared_dir = os.path.join( @@ -48,7 +47,7 @@ print(" Start of LEVEL_04 calculations " + now.strftime("%Y-%m-%d %H:%M")) print("===============================================================") -## input data directory +# input data directory generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_04/NCL/get_PNA_index.ncl") generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_04/NCL/scatter_plot_01.ncl") generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_04/NCL/scatter_plot_02.ncl") @@ -61,14 +60,12 @@ generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_04/NCL/scatter_plot_09.ncl") generate_ncl_call(os.environ["POD_HOME"] + "/LEVEL_04/NCL/scatter_plot_10.ncl") -## copy the html file -file_src = os.environ["POD_HOME"]+"/LEVEL_04/LEVEL_04.html" -file_dest = os.environ["ENSO_RWS_WKDIR"]+"/LEVEL_04.html" -if os.path.isfile( file_dest ): - os.system("rm -f "+file_dest) -os.system("cp "+file_src+" "+file_dest) - +# copy the html file +file_src = os.environ["POD_HOME"] + "/LEVEL_04/LEVEL_04.html" +file_dest = os.environ["ENSO_RWS_WKDIR"] + "/LEVEL_04.html" +if os.path.isfile(file_dest): + os.system("rm -f " + file_dest) +os.system("cp " + file_src + " " + file_dest) now = datetime.datetime.now() -print (" LEVEL_04 completed " + now.strftime("%Y-%m-%d %H:%M") ) - +print(" LEVEL_04 completed " + now.strftime("%Y-%m-%d %H:%M")) diff --git a/diagnostics/ENSO_RWS/LEVEL_05/LEVEL_05.html b/diagnostics/ENSO_RWS/LEVEL_05/LEVEL_05.html index 3e5136481..df4c7129b 100644 --- a/diagnostics/ENSO_RWS/LEVEL_05/LEVEL_05.html +++ b/diagnostics/ENSO_RWS/LEVEL_05/LEVEL_05.html @@ -22,8 +22,8 @@

ENSO ROSSBY WAVE LEVEL 05 Diagnostics


 

-

RESULTS:  LEVEL 05  - +

< color="#000080">RESULTS:  LEVEL 05< color="#000080">< color="#000080">  +

@@ -35,32 +35,32 @@

ENSO ROSSBY WAVE LEVEL 05 Diagnostics

diff --git a/diagnostics/ENSO_RWS/settings.jsonc b/diagnostics/ENSO_RWS/settings.jsonc index 4e594ed2d..6ce291e92 100644 --- a/diagnostics/ENSO_RWS/settings.jsonc +++ b/diagnostics/ENSO_RWS/settings.jsonc @@ -2,6 +2,7 @@ "settings": { "driver": "ENSO_RWS.py", "long_name": "ENSO Rossby Wave Source", + "convention": "cesm", "description": "ENSO Rossby Wave Source", "runtime_requirements": { "ncl": ["contributed", "gsn_code", "gsn_csm", "shea_util", "calendar_decode2"] @@ -11,8 +12,16 @@ "frequency": "mon" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -25,41 +34,48 @@ "zg": { "standard_name": "geopotential_height", "units": "m", + "realm": "atmos", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "ua": { "standard_name": "eastward_wind", "units": "m s-1", + "realm": "atmos", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "va": { "standard_name": "northward_wind", "units": "m s-1", + "realm": "atmos", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "ta": { - "standard_name": "air_temperature", + "standard_name": "air_temperature", "units": "K", + "realm": "atmos", "dimensions": ["time", "lev", "lat", "lon"], "freq": "mon" }, "wap": { "standard_name": "lagrangian_tendency_of_air_pressure", "units": "Pa s-1", + "realm": "atmos", "dimensions": ["time", "lev", "lat", "lon"], - "freq": "mon" + "freq": "mon" }, "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "dimensions": ["time", "lat", "lon"], "freq": "mon" }, "ts": { "standard_name": "surface_temperature", + "realm": "atmos", "units": "K", "dimensions": ["time", "lat", "lon"], "freq": "mon" diff --git a/diagnostics/ENSO_RWS/shared/generate_ncl_call.py b/diagnostics/ENSO_RWS/shared/generate_ncl_call.py index 04f63c5cd..ed6333310 100644 --- a/diagnostics/ENSO_RWS/shared/generate_ncl_call.py +++ b/diagnostics/ENSO_RWS/shared/generate_ncl_call.py @@ -1,23 +1,24 @@ import subprocess -#============================================================ +# ============================================================ # generate_ncl_call - call a ncl_script via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_call(ncl_script): """generate_plots_call - call a ncl_script via subprocess call Arguments: ncl_script (string) - full path to ncl plotting file name """ - # check if the ncl_script exists - - # don't exit if it does not exists just print a warning. + # check if the ncl_script exists - + # don't exit if it does not exists just print a warning. try: - pipe = subprocess.Popen(['ncl -Q {0}'.format(ncl_script)], shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + pipe = subprocess.Popen(['ncl -Q {0}'.format(ncl_script)], shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = pipe.communicate()[0] output = '\t' + str(output).replace('\n','\n\t') print('NCL routine {0}:\n{1}'.format(ncl_script, output)) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 - diff --git a/diagnostics/ENSO_RWS/shared/get_parameters_in.py b/diagnostics/ENSO_RWS/shared/get_parameters_in.py index d0b244bb9..a5a4219b2 100644 --- a/diagnostics/ENSO_RWS/shared/get_parameters_in.py +++ b/diagnostics/ENSO_RWS/shared/get_parameters_in.py @@ -1,37 +1,37 @@ import os.path import sys -def get_parameters_in(lon1, lon2, lat1, lat2, sigma, im1, im2, season, prefix): -## read in all parameter data - file_path = os.path.join(prefix,"../shared/","parameters.txt") +def get_parameters_in(lon1, lon2, lat1, lat2, sigma, im1, im2, season, prefix): + # read in all parameter data + file_path = os.path.join(prefix, "../shared/", "parameters.txt") - if (os.path.exists( file_path)): + if os.path.exists( file_path): file = open( file_path, 'r') line = file.readline() line = line.strip() column = line.split() - lon1 = float(column[2]) + lon1 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - lon2 = float(column[2]) + lon2 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - lat1 = float(column[2]) + lat1 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - lat2 = float(column[2]) + lat2 = float(column[2]) line = file.readline() line = line.strip() column = line.split() - sigma = float(column[2]) + sigma = float(column[2]) line = file.readline() line = line.strip() @@ -43,7 +43,7 @@ def get_parameters_in(lon1, lon2, lat1, lat2, sigma, im1, im2, season, prefix): column = line.split() im2 = int( column[2]) - if( im2 < im1): + if im2 < im1: im2 = im2 + 12 line = file.readline() @@ -54,8 +54,8 @@ def get_parameters_in(lon1, lon2, lat1, lat2, sigma, im1, im2, season, prefix): file.close() else: - print (" missing file: ", file_path) - print (" exiting get_parameters_in.py ") + print(" missing file: ", file_path) + print(" exiting get_parameters_in.py ") sys.exit() return lon1, lon2, lat1, lat2, sigma, im1, im2, season diff --git a/diagnostics/ENSO_RWS/shared/set_variables_AM4.py b/diagnostics/ENSO_RWS/shared/set_variables_AM4.py index d2d95d5f8..46e8e35c7 100644 --- a/diagnostics/ENSO_RWS/shared/set_variables_AM4.py +++ b/diagnostics/ENSO_RWS/shared/set_variables_AM4.py @@ -8,15 +8,15 @@ os.environ["lon_var"] = "lon" os.environ["time_var"] = "time" os.environ["ps_var"] = "PS" -os.environ["pr_conversion_factor"] = "1" #units = m/s -os.environ["prc_conversion_factor"] = "1" #units = m/s -os.environ["prls_conversion_factor"] = "1" #units = m/s +os.environ["pr_conversion_factor"] = "1" #units = m/s +os.environ["prc_conversion_factor"] = "1" #units = m/s +os.environ["prls_conversion_factor"] = "1" #units = m/s # ------------------------------------------------------------------------ # Variables for Convective Transition Diagnostics module: -os.environ["ta_var"] = "ta" # 3D temperature, units = K -os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) -os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K -os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) +os.environ["ta_var"] = "ta" # 3D temperature, units = K +os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) +os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K +os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) # End - Variables for Convective Transition Diagnostics package # ------------------------------------------------------------------------ diff --git a/diagnostics/ENSO_RWS/shared/set_variables_CESM.py b/diagnostics/ENSO_RWS/shared/set_variables_CESM.py index 15289bee0..27448a4ff 100644 --- a/diagnostics/ENSO_RWS/shared/set_variables_CESM.py +++ b/diagnostics/ENSO_RWS/shared/set_variables_CESM.py @@ -13,7 +13,7 @@ os.environ["ua_var"] = "U" os.environ["va_var"] = "V" os.environ["zg_var"] = "Z3" -os.environ["ta_var"] = "T" # 3D temperature, units = K +os.environ["ta_var"] = "T" # 3D temperature, units = K os.environ["qa_var"] = "Q" os.environ["omega_var"] = "OMEGA" @@ -39,16 +39,14 @@ os.environ["u850_var"] = "U850" os.environ["v850_var"] = "V850" os.environ["omega500_var"] = "OMEGA500" - -### os.environ["pr_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prc_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prls_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prc_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prls_conversion_factor"] = "1000" # units in CAM (m/s), convert to kg/m2/s (mm/s) # ------------------------------------------------------------------------ # Variables for Convective Transition Diagnostics module: -os.environ["ta_var"] = "T" # 3D temperature, units = K -os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) -os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K -os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) +os.environ["ta_var"] = "T" # 3D temperature, units = K +os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) +os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K +os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) # End - Variables for Convective Transition Diagnostics package # ------------------------------------------------------------------------ diff --git a/diagnostics/ENSO_RWS/shared/set_variables_CMIP.py b/diagnostics/ENSO_RWS/shared/set_variables_CMIP.py index 7b25fc17f..3833d9d30 100644 --- a/diagnostics/ENSO_RWS/shared/set_variables_CMIP.py +++ b/diagnostics/ENSO_RWS/shared/set_variables_CMIP.py @@ -13,18 +13,18 @@ os.environ["ua_var"] = "ua" os.environ["va_var"] = "va" os.environ["zg_var"] = "zg" -os.environ["ta_var"] = "ta" # 3D temperature, units = K +os.environ["ta_var"] = "ta" # 3D temperature, units = K os.environ["qa_var"] = "hus" os.environ["omega_var"] = "wap" os.environ["ts_var"] = "ts" os.environ["pr_var"] = "pr" -## added heat fluxes 2018-12-11 +# added heat fluxes 2018-12-11 os.environ["hfss_var"] = "hfss" os.environ["hfls_var"] = "hfls" -### radiative fluxes +# radiative fluxes os.environ["rsus_var"] = "rsus" os.environ["rsds_var"] = "rsds" os.environ["rsdt_var"] = "rsdt" @@ -34,7 +34,7 @@ os.environ["rlds_var"] = "rlds" os.environ["rlut_var"] = "rlut" -### rest of the variables +# rest of the variables os.environ["prc_var"] = "prc" os.environ["prls_var"] = "prls" os.environ["rlut_var"] = "rlut" @@ -52,14 +52,14 @@ os.environ["v850_var"] = "V850" os.environ["omega500_var"] = "OMEGA500" -os.environ["pr_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prc_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) -os.environ["prls_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["pr_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prc_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) +os.environ["prls_conversion_factor"] = "1" # units in CAM (m/s), convert to kg/m2/s (mm/s) # ------------------------------------------------------------------------ # Variables for Convective Transition Diagnostics module: -os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) -os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K -os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) +os.environ["prw_var"] = "prw" # Column Water Vapor (precipitable water vapor), units = mm (or kg/m^2) +os.environ["tave_var"] = "tave" # Mass-Weighted Column Average Tropospheric Temperature, units = K +os.environ["qsat_int_var"] = "qsat_int" # Vertically-Integrated Saturation Specific Humidity, units = mm (or kg/m^2) # End - Variables for Convective Transition Diagnostics package # ------------------------------------------------------------------------ diff --git a/diagnostics/ENSO_RWS/shared/util.py b/diagnostics/ENSO_RWS/shared/util.py index a8369cef7..120dbfbb1 100644 --- a/diagnostics/ENSO_RWS/shared/util.py +++ b/diagnostics/ENSO_RWS/shared/util.py @@ -1,46 +1,49 @@ +import os -def setenv (varname,varvalue,env_dict,verbose=0): - import os + +def setenv(varname: str, varvalue, env_dict: dict, verbose=0): # Not currently used. Needs to be a dictionary to be dumped once file is created # # Ideally this could be a wrapper to os.environ so any new env vars # automatically get written to the file "replaces os.environ to set the variable AND save it to write out in namelist" - if (verbose > 2 ): print ("Saving ",varname," = ",varvalue) + if verbose > 2: + print("Saving ", varname, " = ", varvalue) os.environ[varname] = varvalue - env_dict[varname] = varvalue - if ( verbose > 2) : print ("Check ",varname," ",env_dict[varname]) - + env_dict[varname] = varvalue + if verbose > 2: + print("Check ", varname, " ", env_dict[varname]) -def check_required_dirs(verbose=3, already_exist =[], create_if_nec = []): +def check_required_dirs(verbose=3, already_exist=[], create_if_nec=[]): # arguments can be envvar name or just the paths - -# print("var_code/ENSO_MSE/COMPOSITE/util.py check_required_dirs") - import os - filestr = __file__+":check_required_dirs: " - errstr = "ERROR "+filestr - if verbose > 1: print (filestr +" starting") - for dir_in in already_exist + create_if_nec : - if verbose > 1: print ("\t looking for required dir: "+dir_in ) + filestr = __file__ + ":check_required_dirs: " + errstr = "ERROR " + filestr + if verbose > 1: + print(filestr + " starting") + for dir_in in already_exist + create_if_nec: + if verbose > 1: + print("\t looking for required dir: " + dir_in) if dir_in in os.environ: dir = os.environ[dir_in] - if verbose>2: print(" \t found "+dir_in+" = "+dir) + if verbose > 2: + print(" \t found " + dir_in + " = " + dir) else: - if verbose>2: print(" envvar "+dir_in+" not defined. Setting to self.") + if verbose > 2: + print("envvar " + dir_in + " not defined. Setting to self.") dir = dir_in if not os.path.exists(dir): if not dir_in in create_if_nec: - if (verbose>0): - print(errstr+dir_in+" = "+dir+" directory does not exist") - print(" and not create_if_nec list: ",create_if_nec) + if verbose > 0: + print(errstr+dir_in + " = " + dir + " directory does not exist") + print("and not create_if_nec list: ", create_if_nec) exit() else: - print(dir_in+" = "+dir+" created") + print(dir_in + " = " + dir + " created") os.makedirs(dir) else: - print("Found "+dir) + print("Found " + dir) diff --git a/diagnostics/EOF_500hPa/settings.jsonc b/diagnostics/EOF_500hPa/settings.jsonc index b5c668e18..9eebd882a 100644 --- a/diagnostics/EOF_500hPa/settings.jsonc +++ b/diagnostics/EOF_500hPa/settings.jsonc @@ -12,7 +12,7 @@ "settings" : { "driver" : "EOF_500hPa.py", "long_name" : "EOF of geopotential height anomalies of 500 hPa", - "realm" : "atmos", + "convention": "cesm", "description" : "EOF of geopotential height anomalies for 500 hPa", "runtime_requirements": { "python3": [], @@ -23,16 +23,26 @@ "frequency": "mon" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "x" + }, "plev": { "standard_name": "air_pressure", + "realm": "atmos", "units": "hPa", "positive": "down", "axis": "Z" }, "lev": { "standard_name": "atmosphere_hybrid_sigma_pressure_coordinate", + "realm": "atmos", "units": "level", "positive": "down", "axis": "Z" @@ -45,6 +55,7 @@ "zg": { "standard_name": "geopotential_height", "units": "m", + "realm": "atmos", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"plev": 500}, "alternates": ["zg_hybrid_sigma", "ps"] @@ -52,12 +63,14 @@ "zg_hybrid_sigma": { "standard_name": "geopotential_height", "units": "m", + "realm": "atmos", "dimensions": ["time", "lev", "lat", "lon"], "requirement": "alternate" }, "ps": { "standard_name": "surface_air_pressure", "units": "Pa", + "realm": "atmos", "dimensions": ["time", "lat", "lon"], "requirement": "alternate" } diff --git a/diagnostics/MJO_prop_amp/MJO_prop_amp.html b/diagnostics/MJO_prop_amp/MJO_prop_amp.html index 5f6e29dd8..2c7f45eec 100644 --- a/diagnostics/MJO_prop_amp/MJO_prop_amp.html +++ b/diagnostics/MJO_prop_amp/MJO_prop_amp.html @@ -6,21 +6,17 @@

MJO Propagation and Amplitude Diagnostic (Jiang, UCLA)

This MJO propagation and amplitude diagnostic metrics is mainly motivated -by recent multi‐ -model studies that model skill in representing eastward propagation of the -MJO is closely related +by recent multi‐model studies that model skill in representing eastward propagation of the MJO is closely related to model winter mean low‐level moisture pattern over the Indo‐Pacific -region, and the model -MJO amplitude tends to be tightly associated with the moisture convective -adjustment time -scale. This package is designed to provide further independent verifcation - of these above processes based on new GCM siumulations. +region, and the model MJO amplitude tends to be tightly associated with the moisture convective adjustment time +scale. This package is designed to provide further independent verification of these above processes +based on new GCM simulations.

Full Documentation and Contact Information

-

El Nino

+

< color="#000080">El Nino

-

Positive precipitation anomaly vs divergence anomaly

+

< color="#000080">Positive precipitation anomaly vs divergence anomaly

-

plot

+

< color="#000080">plot

-

Positive precipitation anomaly vs RWS 1

+

< color="#000080">Positive precipitation anomaly vs RWS 1

-

plot

+

< color="#000080">plot

-

Positive precipitation anomaly vs RWS 2 +

< color="#000080">Positive precipitation anomaly vs RWS 2>

-

plot

+

plot

- -
Diagnostics of the Madden-Julian Oscillation +< color=navy> Diagnostics of the Madden-Julian Oscillation


@@ -55,5 +51,4 @@

MJO Propagation and Amplitude Diagnostic (Jiang, UCLA)

MJO amplitude vs Convective moisture adjustment time-scale plot
diff --git a/diagnostics/MJO_prop_amp/MJO_prop_amp.py b/diagnostics/MJO_prop_amp/MJO_prop_amp.py index c985ccd6d..ac288b58c 100644 --- a/diagnostics/MJO_prop_amp/MJO_prop_amp.py +++ b/diagnostics/MJO_prop_amp/MJO_prop_amp.py @@ -1,6 +1,5 @@ # This file is part of the MJO_prop_amp module of the MDTF code package (see LICENSE.txt) - -#=============================================================== +# =============================================================== # Diagnostic package for MJO propagation and amplitude in GCMs # Version 2.1 September 25, 2018. Alex Gonzalez (UCLA, now at IA State) and Xianan Jiang (UCLA) # Contributors: M. Zhao (GFDL), E. Maloney (CSU) @@ -9,9 +8,9 @@ # Currently consists of following functionalities: # (1) Interpolate model output to regular horizontal grids (2.5 x 2.5 deg) ; # (2) Evaluate model skill for MJO propagation based on pattern correlation of rainfall Hovmoller diagrams -# follwoing Jiang et al. (2015), and link model MJO propagation skill to model skill in the low-level +# following Jiang et al. (2015), and link model MJO propagation skill to model skill in the low-level # mean moisture pattern following Jiang (2017) and Gonzalez & Jiang (2017); -# (3) Evaluate model MJO amplitude and link it to model conovective moisture adjustment time scale +# (3) Evaluate model MJO amplitude and link it to model convective moisture adjustment time scale # following Jiang et al. (2016); # All scripts of this package can be found under: /diagnostics/MJO_prop_amp @@ -26,42 +25,46 @@ # (3) 4-D specific humidity (units: g/g) # Reference: -# Jiang et al (2015): Vertical structure and physical processes of the Madden-Julian oscillation: +# Jiang et al. (2015): Vertical structure and physical processes of the Madden-Julian oscillation: # Exploring key model physics in climate simulations. JGR-Atmos, 10.1002/2014JD022375, 4718-4748. -# Jiang et al (2016): Convective moisture adjustment time scale as a key factor in regulating +# Jiang et al. (2016): Convective moisture adjustment time scale as a key factor in regulating # model amplitude of the Madden-Julian Oscillation. GRL,43,10,412-10,419. # Jiang (2017): Key processes for the eastward propagation of the Madden-Julian Oscillation # based on multimodel simulations. JGR-Atmos, 10.1002/2016JD025955. # Gonzalez & Jiang (2017): Winter Mean Lower-Tropospheric Moisture over the Maritime Continent -# as a Climate Model Diagnostic Metric for the Propagation of the Madden-Julian Oscillation. GRL . +# as a Climate Model Diagnostic Metric for the Propagation of the Madden-Julian Oscillation. GRL. import os import subprocess import time -#============================================================ +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_plots(nclPlotFile): # check if the nclPlotFile exists - # don't exit if it does not exists just print a warning. try: pipe = subprocess.Popen(['ncl {0}'.format(nclPlotFile)], shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].decode() - print('NCL routine {0} \n {1}'.format(nclPlotFile,output)) + print('NCL routine {0} \n {1}'.format(nclPlotFile, output)) while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 -#============================================================ +# ============================================================ # Call NCL code here -#============================================================ +# ============================================================ # create synonyms for env var names to avoid changes to rest of this POD's code -os.environ["file_pr"] = os.environ["PR_FILE"] + + +os.environ["file_pr"] = os.environ["PR_FILE"] os.environ["file_prw"] = os.environ["PRW_FILE"] os.environ["file_hus"] = os.environ["HUS_FILE"] @@ -69,9 +72,6 @@ def generate_ncl_plots(nclPlotFile): print("=======") print("Diagnostics for MJO propagation and amplitude") print("Interpolating model data to standard grids ...") -generate_ncl_plots(os.environ["POD_HOME"]+"/m_intp.ncl") -print("Starting disgostic program ...") -generate_ncl_plots(os.environ["POD_HOME"]+"/m_diag.ncl") - - - +generate_ncl_plots(os.environ["POD_HOME"] + "/m_intp.ncl") +print("Starting diagnostic program ...") +generate_ncl_plots(os.environ["POD_HOME"] + "/m_diag.ncl") diff --git a/diagnostics/MJO_prop_amp/m_diag.ncl b/diagnostics/MJO_prop_amp/m_diag.ncl index c561997a6..ee4d6195e 100644 --- a/diagnostics/MJO_prop_amp/m_diag.ncl +++ b/diagnostics/MJO_prop_amp/m_diag.ncl @@ -9,7 +9,7 @@ setfileoption("nc", "Format", getenv("MDTF_NC_FORMAT")) begin -wk_dir = getenv("WK_DIR") +wk_dir = getenv("WORK_DIR") OBS_DATA = getenv("OBS_DATA") ; model skill score names (MJO propagtion, specific humidity, ; MJO amplitude, convect. timescale) diff --git a/diagnostics/MJO_prop_amp/m_intp.ncl b/diagnostics/MJO_prop_amp/m_intp.ncl index 37baf41fa..5b0dbba87 100644 --- a/diagnostics/MJO_prop_amp/m_intp.ncl +++ b/diagnostics/MJO_prop_amp/m_intp.ncl @@ -8,9 +8,9 @@ setfileoption("nc", "Format", getenv("MDTF_NC_FORMAT")) begin -print_clock("Starting model data inteperation") +print_clock("Starting model data interpolation") -wk_dir = getenv("WK_DIR") +wk_dir = getenv("WORK_DIR") pr_name = getenv("pr_var") prw_name = getenv("prw_var") hus_name = getenv("hus_var") diff --git a/diagnostics/MJO_prop_amp/settings.jsonc b/diagnostics/MJO_prop_amp/settings.jsonc index a1bd053c4..94293a8ac 100644 --- a/diagnostics/MJO_prop_amp/settings.jsonc +++ b/diagnostics/MJO_prop_amp/settings.jsonc @@ -13,7 +13,7 @@ "settings": { "driver": "MJO_prop_amp.py", "long_name": "MJO propagation and amplitude in GCMs", - "realm" : "atmos", + "convention" : "cesm", "description": "Diagnostics of the Madden-Julian Oscillation (Jiang UCLA)", "runtime_requirements": { "python3": [], @@ -24,8 +24,16 @@ "frequency": "day" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "Pa", @@ -38,15 +46,18 @@ "pr": { "standard_name": "precipitation_flux", "units": "kg m-2 s-1", + "realm": "atmos", "dimensions": ["time", "lat", "lon"] }, "prw": { "standard_name": "atmosphere_mass_content_of_water_vapor", "units": "kg m-2", + "realm": "atmos", "dimensions": ["time", "lat", "lon"] }, "hus": { "standard_name": "specific_humidity", + "realm": "atmos", "units": "1", "dimensions": ["time", "lev", "lat", "lon"] } diff --git a/diagnostics/MJO_suite/MJO_suite.html b/diagnostics/MJO_suite/MJO_suite.html index 3696c9b61..b7d4a53c7 100644 --- a/diagnostics/MJO_suite/MJO_suite.html +++ b/diagnostics/MJO_suite/MJO_suite.html @@ -6,7 +6,8 @@

Madden-Julian Oscillation Analysis from NCAR

This module computes many of the diagnostics described by the WGNE - MJO Task Force and developed by Dennis Shea for observational data. Using daily + MJO Task Force and developed + by Dennis Shea for observational data. Using daily precipitation, outgoing longwave radiation, zonal wind at 850 and 200 hPa and meridional wind at 200 hPa, the module computes anomalies, bandpass-filters for the 20-100 day period, calculates the MJO Index as diff --git a/diagnostics/MJO_suite/MJO_suite.py b/diagnostics/MJO_suite/MJO_suite.py index b02fcb0be..ada67005f 100644 --- a/diagnostics/MJO_suite/MJO_suite.py +++ b/diagnostics/MJO_suite/MJO_suite.py @@ -9,9 +9,11 @@ import subprocess import time -#============================================================ +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_plots(nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call @@ -27,13 +29,14 @@ def generate_ncl_plots(nclPlotFile): while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) - + print('WARNING', e.errno, e.strerror) return 0 -#============================================================ +# ============================================================ # Call NCL code here -#============================================================ +# ============================================================ + + if not os.path.exists(os.path.join(os.environ['DATADIR'], 'day')): os.makedirs(os.path.join(os.environ['DATADIR'], 'day')) @@ -52,7 +55,7 @@ def generate_ncl_plots(nclPlotFile): print("MJO spectra") generate_ncl_plots(os.environ["POD_HOME"]+"/mjo_spectra.ncl") -if os.path.isfile( os.environ["WK_DIR"]+"/model/netCDF/MJO_PC_INDEX.nc"): +if os.path.isfile( os.environ["WORK_DIR"]+"/model/netCDF/MJO_PC_INDEX.nc"): print("WARNING: MJO_PC_INDEX.nc already exists. Not re-running.") else: generate_ncl_plots(os.environ["POD_HOME"]+"/mjo_EOF_cal.ncl") diff --git a/diagnostics/MJO_suite/calc_utils.ncl b/diagnostics/MJO_suite/calc_utils.ncl index 22663ddf0..35c75b6ca 100644 --- a/diagnostics/MJO_suite/calc_utils.ncl +++ b/diagnostics/MJO_suite/calc_utils.ncl @@ -13,7 +13,7 @@ load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl" setfileoption("nc", "Format", getenv("MDTF_NC_FORMAT")) -;; Local variables for alll plot routines ;; +;; Local variables for all plot routines ;; pwks = "ps" ; Output format (ps,X11,png,gif) _FillValue = -999 ; Missing value for FLOAT variables. diff --git a/diagnostics/MJO_suite/daily_anom.ncl b/diagnostics/MJO_suite/daily_anom.ncl index 7803b40d6..5515f784a 100644 --- a/diagnostics/MJO_suite/daily_anom.ncl +++ b/diagnostics/MJO_suite/daily_anom.ncl @@ -16,10 +16,10 @@ time_coord = getenv("time_coord") lat_coord = getenv("lat_coord") lon_coord = getenv("lon_coord") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" datadir = getenv("DATADIR") -yr1 = stringtointeger(getenv("FIRSTYR")) -yr2 = stringtointeger(getenv("LASTYR")) +yr1 = stringtointeger(getenv("startdate")) +yr2 = stringtointeger(getenv("enddate")) start_date = yr1*10000+201 end_date = (yr2+1)*10000+101 diff --git a/diagnostics/MJO_suite/daily_netcdf.ncl b/diagnostics/MJO_suite/daily_netcdf.ncl index f14d41352..d73748968 100644 --- a/diagnostics/MJO_suite/daily_netcdf.ncl +++ b/diagnostics/MJO_suite/daily_netcdf.ncl @@ -29,7 +29,7 @@ debug_print("Starting...",routine_name,debug) casename = getenv("CASENAME") datadir = getenv("DATADIR") level = getenv("lev_coord") -wk_dir = getenv("WK_DIR")+"/model/netCDF/" +wk_dir = getenv("WORK_DIR")+"/model/netCDF/" file_u200 = getenv("U200_FILE") @@ -42,8 +42,8 @@ print("daily_netcdf.ncl reading "+file_pr+" for time coordinates.") print(" Assuming without checking that all have same time coordinates!") f = addfile(file_pr,"r") -yr1 = stringtointeger(getenv("FIRSTYR")) -yr2 = stringtointeger(getenv("LASTYR")) +yr1 = stringtointeger(getenv("startdate")) +yr2 = stringtointeger(getenv("enddate")) lat_coord = getenv("lat_coord") lon_coord = getenv("lon_coord") diff --git a/diagnostics/MJO_suite/mjo_EOF.ncl b/diagnostics/MJO_suite/mjo_EOF.ncl index eff47e947..35bd3a476 100644 --- a/diagnostics/MJO_suite/mjo_EOF.ncl +++ b/diagnostics/MJO_suite/mjo_EOF.ncl @@ -18,7 +18,7 @@ vars = (/"pr","rlut","u200","u850","v200","v850"/) casename = getenv("CASENAME") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" filename_pr = file_dir+casename+".pr.day.anom.nc" filename_rlut = file_dir+casename+".rlut.day.anom.nc" filename_u200 = file_dir+casename+".u200.day.anom.nc" diff --git a/diagnostics/MJO_suite/mjo_EOF_cal.ncl b/diagnostics/MJO_suite/mjo_EOF_cal.ncl index a72fcd3f7..f1280cbde 100644 --- a/diagnostics/MJO_suite/mjo_EOF_cal.ncl +++ b/diagnostics/MJO_suite/mjo_EOF_cal.ncl @@ -13,7 +13,7 @@ begin routine_name = "mjo_EOF_cal.ncl" casename = getenv("CASENAME") - file_dir = getenv("WK_DIR")+"/model/netCDF/" + file_dir = getenv("WORK_DIR")+"/model/netCDF/" neof = 2 diff --git a/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl b/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl index 17a9e83ef..2e6fc79af 100644 --- a/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl +++ b/diagnostics/MJO_suite/mjo_lag_lat_lon.ncl @@ -14,7 +14,7 @@ routine_name = "mjo_lat_lat_lon.ncl" vars = (/"pr","u850"/) casename = getenv("CASENAME") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" filename_pr = file_dir+casename+".pr.day.anom.nc" filename_u850 = file_dir+casename+".u850.day.anom.nc" diff --git a/diagnostics/MJO_suite/mjo_life_cycle.ncl b/diagnostics/MJO_suite/mjo_life_cycle.ncl index 5487a508a..b21bb8dc2 100644 --- a/diagnostics/MJO_suite/mjo_life_cycle.ncl +++ b/diagnostics/MJO_suite/mjo_life_cycle.ncl @@ -14,7 +14,7 @@ begin routine_name = "mjo_life_cycle" casename = getenv("CASENAME") - file_dir = getenv("WK_DIR")+"/model/netCDF/" + file_dir = getenv("WORK_DIR")+"/model/netCDF/" latS = -20 latN = 20 diff --git a/diagnostics/MJO_suite/mjo_spectra.ncl b/diagnostics/MJO_suite/mjo_spectra.ncl index 2854ecde8..fdec489cd 100644 --- a/diagnostics/MJO_suite/mjo_spectra.ncl +++ b/diagnostics/MJO_suite/mjo_spectra.ncl @@ -15,7 +15,7 @@ vars = (/"pr","rlut","u200","u850","v200","v850"/) casename = getenv("CASENAME") -file_dir = getenv("WK_DIR")+"/model/netCDF/" +file_dir = getenv("WORK_DIR")+"/model/netCDF/" filename_pr = file_dir+casename+".pr.day.anom.nc" filename_rlut = file_dir+casename+".rlut.day.anom.nc" filename_u200 = file_dir+casename+".u200.day.anom.nc" diff --git a/diagnostics/MJO_suite/settings.jsonc b/diagnostics/MJO_suite/settings.jsonc index aa6397413..c990440cc 100644 --- a/diagnostics/MJO_suite/settings.jsonc +++ b/diagnostics/MJO_suite/settings.jsonc @@ -8,7 +8,7 @@ "settings" : { "driver" : "MJO_suite.py", "long_name" : "MJO diagnostics suite (from AMWG variability diagnostic package)", - "realm" : "atmos", + "convention" : "cesm", "description" : "MJO CLIVAR suite (NCAR)", "runtime_requirements": { "python3": [], @@ -19,8 +19,17 @@ "frequency": "day" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "dimensions": { + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -32,34 +41,40 @@ "varlist": { "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"] }, "pr": { "standard_name": "precipitation_rate", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"] }, "u200": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 200} }, "u850": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 850} }, "v200": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 200} }, "v850": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 850} diff --git a/diagnostics/MJO_teleconnection/MJO_teleconnection.html b/diagnostics/MJO_teleconnection/MJO_teleconnection.html index fe59d06cf..7328516ea 100644 --- a/diagnostics/MJO_teleconnection/MJO_teleconnection.html +++ b/diagnostics/MJO_teleconnection/MJO_teleconnection.html @@ -13,13 +13,13 @@

Phase composites and process-oriented diagnosis of MJO teleconnections

possible poor teleconnections. Performance of the candidate model is assessed relative to a cloud of observations and CMIP5 simulations.

-Full Documentation and Contact Information + + Full Documentation and Contact Information

- -

Phase composites and process oriented diagonosis of MJO teleconnections

+

< color=navy> Phase composites and process oriented diagonosis of MJO teleconnections


-

Anomalous geopotential height and precipitation phase composites

+

< color=Blue>Anomalous geopotential height and precipitation phase composites

@@ -41,7 +41,6 @@

Anomalous geopotential height and precipitation

Process oriented diagnostic

- @@ -52,7 +51,4 @@

Process oriented diagnostic

Teleconnection skills v/s Mean state skills plot -
- - diff --git a/diagnostics/MJO_teleconnection/htmls/figure2a.html b/diagnostics/MJO_teleconnection/htmls/figure2a.html index 19ca54d1d..ffa66b708 100644 --- a/diagnostics/MJO_teleconnection/htmls/figure2a.html +++ b/diagnostics/MJO_teleconnection/htmls/figure2a.html @@ -15,24 +15,27 @@ p.ex1 { margin-left: 60px; } - - - - - +
-
-

Teleconnection patterns associated with MJO phase 8

+
+

< color=navy> Teleconnection patterns associated with MJO phase 8


-
+
-
- +
+ +
+
+

< color=black> Teleconnection pattern correlation averaged for all MJO phases (y axes) +
relative to the MJO E/W ratio, the dashed line indicates the observed
E/W ratio, + and the open circles represent the poor MJO models. +

+
+
-
-

Teleconnection pattern correlation averaged for all MJO phases (y axes)
relative to the MJO E/W ratio, the dashed line indicates the observed
E/W ratio, and the open circles represent the poor MJO models.

-
-
+
+
+ \ No newline at end of file diff --git a/diagnostics/MJO_teleconnection/htmls/figure2b.html b/diagnostics/MJO_teleconnection/htmls/figure2b.html index d311989d3..4ba40da43 100644 --- a/diagnostics/MJO_teleconnection/htmls/figure2b.html +++ b/diagnostics/MJO_teleconnection/htmls/figure2b.html @@ -15,24 +15,27 @@ p.ex1 { margin-left: 40px; } - - - - +
-
-

Teleconnection skills v/s Mean state skills

+
+

< color=navy> Teleconnection skills v/s Mean state skills


- +
-

Teleconnection pattern correlation averaged for all MJO phases (y axes) relative to the 250-hPa
mean zonal wind RMS error, the plus signs show the model zonal wind RMS error over the full
Pacific basin, while the filled circles indicate the longitudinal RMS error in the region of the sub-
tropical jet. See text (Henderson et al. 2017 J Climate) for more detailed explanations.

+

< color=black> Teleconnection pattern correlation averaged for all MJO phases + (y axes) relative to the 250-hPa
mean zonal wind RMS error, + the plus signs show the model zonal wind RMS error over the full
Pacific basin, while the filled + circles indicate the longitudinal RMS error in the region of the sub-
tropical jet. + See text (Henderson et al. 2017 J Climate) for more detailed explanations. +

diff --git a/diagnostics/MJO_teleconnection/htmls/phase1.html b/diagnostics/MJO_teleconnection/htmls/phase1.html index 5f492d500..1c51ee9ee 100644 --- a/diagnostics/MJO_teleconnection/htmls/phase1.html +++ b/diagnostics/MJO_teleconnection/htmls/phase1.html @@ -14,15 +14,21 @@
-

Teleconnection patterns associated with MJO phase 1

+

< color=navy> Teleconnection patterns associated with MJO phase 1


- +
-

MJO phase 1 pentad composites of anomalous 250-hPa geopotential height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted.
The color shading shows the anomalous tropical precipitation composite during MJO
phase 1.

+

< color=black> MJO phase 1 pentad composites of anomalous 250-hPa geopotential height, + where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO +
phase. Positive geopotential height anomalies are in red solid contours, and negative
+ anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
+ is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO
phase 1. +

diff --git a/diagnostics/MJO_teleconnection/htmls/phase2.html b/diagnostics/MJO_teleconnection/htmls/phase2.html index d24f43b32..f1ff66c0c 100644 --- a/diagnostics/MJO_teleconnection/htmls/phase2.html +++ b/diagnostics/MJO_teleconnection/htmls/phase2.html @@ -1,4 +1,4 @@ - + -
-

Teleconnection patterns associated with MJO phase 3

+

< color=navy> Teleconnection patterns associated with MJO phase 3


@@ -23,6 +22,13 @@

Teleconnection patterns associat

-

MJO phase 3 pentad composites of anomalous 250-hPa geopotential height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted.
The color shading shows the anomalous tropical precipitation composite during MJO
phase 3.

+

< color=black> MJO phase 3 pentad composites of anomalous 250-hPa geopotential height, + where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO +
phase. Positive geopotential height anomalies are in red solid contours, and negative +
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
+ is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO
phase 3. +

+ diff --git a/diagnostics/MJO_teleconnection/htmls/phase4.html b/diagnostics/MJO_teleconnection/htmls/phase4.html index a7748f1d0..5ef75d2c0 100644 --- a/diagnostics/MJO_teleconnection/htmls/phase4.html +++ b/diagnostics/MJO_teleconnection/htmls/phase4.html @@ -13,7 +13,7 @@
-

Teleconnection patterns associated with MJO phase 4

+

< color=navy> Teleconnection patterns associated with MJO phase 4


@@ -23,6 +23,13 @@

Teleconnection patterns associat
-

MJO phase 4 pentad composites of anomalous 250-hPa geopotential height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted.
The color shading shows the anomalous tropical precipitation composite during MJO
phase 4.

+

< color=black> MJO phase 4 pentad composites of anomalous 250-hPa geopotential + height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following + an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative +
anomalies are in blue dashed contours. Contours are every 10 m, and the zero + contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted. +
The color shading shows the anomalous tropical precipitation composite during MJO +
phase 4. +

diff --git a/diagnostics/MJO_teleconnection/htmls/phase5.html b/diagnostics/MJO_teleconnection/htmls/phase5.html index 2e3205067..2d145c106 100644 --- a/diagnostics/MJO_teleconnection/htmls/phase5.html +++ b/diagnostics/MJO_teleconnection/htmls/phase5.html @@ -3,8 +3,6 @@ - -
-

Teleconnection patterns associated with MJO phase 7

+
+

< color=navy> Teleconnection patterns associated with MJO phase 7


@@ -26,6 +26,12 @@

Teleconnection patterns associat

-

MJO phase 7 pentad composites of anomalous 250-hPa geopotential height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted.
The color shading shows the anomalous tropical precipitation composite during MJO
phase 7.

+

< color=black> MJO phase 7 pentad composites of anomalous 250-hPa geopotential height, + where
pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
+ phase. Positive geopotential height anomalies are in red solid contours, and negative
+ anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
+ is omitted. Anomalies found to be 95% significantly different from zero are dotted.
+ The color shading shows the anomalous tropical precipitation composite during MJO
phase 7. +

diff --git a/diagnostics/MJO_teleconnection/htmls/phase8.html b/diagnostics/MJO_teleconnection/htmls/phase8.html index b1b946e46..201d9a29a 100644 --- a/diagnostics/MJO_teleconnection/htmls/phase8.html +++ b/diagnostics/MJO_teleconnection/htmls/phase8.html @@ -13,7 +13,7 @@
-

Teleconnection patterns associated with MJO phase 8

+

< color=navy> Teleconnection patterns associated with MJO phase 8


@@ -22,6 +22,12 @@

Teleconnection patterns associat

-

MJO phase 8 pentad composites of anomalous 250-hPa geopotential height, where
a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 following an MJO
phase. Positive geopotential height anomalies are in red solid contours, and negative
anomalies are in blue dashed contours. Contours are every 10 m, and the zero contour
is omitted. Anomalies found to be 95% significantly different from zero are dotted.
The color shading shows the anomalous tropical precipitation composite during MJO
phase 8. +

< color=black> MJO phase 8 pentad composites of anomalous 250-hPa geopotential height, + where a pentad denotes a 5-lag mean, or the field average of lags 0 - 4 + following an MJO phase. Positive geopotential height anomalies are in red solid contours, + and negative anomalies are in blue dashed contours. Contours are every 10 m, and the zero + contour is omitted. Anomalies found to be 95% significantly different from zero are dotted. + The color shading shows the anomalous tropical precipitation composite during MJO phase 8. +

diff --git a/diagnostics/MJO_teleconnection/mjo_daig_Corr_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_daig_Corr_MDTF.ncl index 25b26a7b2..4f9905b84 100644 --- a/diagnostics/MJO_teleconnection/mjo_daig_Corr_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_daig_Corr_MDTF.ncl @@ -31,7 +31,7 @@ print("------------------------------------------------------------------------- ;--------------------------------------------------------------------------------- print("Reading Geopotential height data for model : "+Model) - diri = getenv("WK_DIR")+"/model/netCDF/" + diri = getenv("WORK_DIR")+"/model/netCDF/" dirobs = getenv("OBS_DATA")+"/" filgeom = "geop_compositesP_hgt250_"+Model+".nc" filgeobs = "geop_compositesP_hgt250.nc" diff --git a/diagnostics/MJO_teleconnection/mjo_diag_EWR_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_EWR_MDTF.ncl index bc5432692..5ed8035b7 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_EWR_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_EWR_MDTF.ncl @@ -39,7 +39,7 @@ print("------------------------------------------------------------------------- print("Reading daily precipitation data") pName = getenv("pr_var") ; mm/day dirobs = getenv("OBS_DATA")+"/" - diro = getenv("WK_DIR")+"/model/netCDF/" + diro = getenv("WORK_DIR")+"/model/netCDF/" filp = getenv("prec_file") filewcmip ="EWratio_wf_CMIP5" print("Input data file observation geop composite :" + filp) diff --git a/diagnostics/MJO_teleconnection/mjo_diag_RMM_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_RMM_MDTF.ncl index 6dbfc3ba5..9c14b85bf 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_RMM_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_RMM_MDTF.ncl @@ -47,7 +47,7 @@ print("Model: " + Model) ;------------ Read in zonal wind and olr -------------------- diri1 = getenv("OBS_DATA")+"/" ;Observed Input data directory -diro = getenv("WK_DIR")+"/model/netCDF/" +diro = getenv("WORK_DIR")+"/model/netCDF/" filolr = getenv("olr_file") ; input file filu850 = getenv("u850_file") ; input file filu250 = getenv("u250_file") ; input file diff --git a/diagnostics/MJO_teleconnection/mjo_diag_U250_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_U250_MDTF.ncl index 3edf464eb..aeaad329c 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_U250_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_U250_MDTF.ncl @@ -45,7 +45,7 @@ print("Reading DJF mean ERA-I(1979-2016) zonal wind data .....") ;--directory for output files-- ;--------------------------------------------------------------------------------- dirobs = getenv("OBS_DATA")+"/" ;Output data directory -diro = getenv("WK_DIR")+"/model/netCDF/" +diro = getenv("WORK_DIR")+"/model/netCDF/" filobsU = "ERAi.1979_2016.DJF.mean.nc" filu250 = getenv("u250_file") ; input file diff --git a/diagnostics/MJO_teleconnection/mjo_diag_fig1_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_fig1_MDTF.ncl index d7134a2b4..2874e353b 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_fig1_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_fig1_MDTF.ncl @@ -41,8 +41,8 @@ print("------------------------------------------------------------------------- ; Input and output data directories ;---------------------------------------------------------------------------------------------------------| dirobs = getenv("OBS_DATA")+"/" -dirm = getenv("WK_DIR")+"/model/netCDF/" -dirfig = getenv("WK_DIR")+"/model/PS/" +dirm = getenv("WORK_DIR")+"/model/netCDF/" +dirfig = getenv("WORK_DIR")+"/model/PS/" print("precipitation and geopotential height composites are read in") diff --git a/diagnostics/MJO_teleconnection/mjo_diag_fig2_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_fig2_MDTF.ncl index 8c5ff067e..ccefc358d 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_fig2_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_fig2_MDTF.ncl @@ -34,8 +34,8 @@ pentad = 0 ; Input and output data directories ;---------------------------------------------------------------------------------------------------------| dirobs = getenv("OBS_DATA")+"/" -dirm = getenv("WK_DIR")+"/model/netCDF/" -dirfig = getenv("WK_DIR")+"/model/PS/" +dirm = getenv("WORK_DIR")+"/model/netCDF/" +dirfig = getenv("WORK_DIR")+"/model/PS/" ;============================================================================================== print("read in pattern correlation data") ;---------------------------------------------------------------------------------------------- @@ -57,11 +57,7 @@ print("read in pattern correlation data") print("read in zonal wind RMS error data ") ;---------------------------------------------------------------------------------------------- RMS_U_jet = asciiread(dirm+"U250_RMS_jetext_updated",(/-1/),"float") ;basic state zonal wind corr - RMS_U = asciiread(dirm+ "U250_RMS_updated",(/-1/),"float") - - ;print(model_good) - ;print(RMS_U) -; print(RMS_U_jet) + RMS_U = asciiread(dirm+ "U250_RMS_updated",(/-1/),"float") RMS_U_M = new((/dimsizes(model_good)+1/),typeof(RMS_U)) RMS_U_M(0) = 0.0 @@ -70,7 +66,6 @@ print("read in zonal wind RMS error data ") RMS_U_jet_M = new((/dimsizes(model_good)+1/),typeof(RMS_U)) RMS_U_jet_M(0) = 0.0 RMS_U_jet_M(1:) = RMS_U_jet(model_good) -; print(RMS_U_jet_M) ;-- Make E/W ratio vs teleconnection plot plot - @@ -78,15 +73,11 @@ print("read in zonal wind RMS error data ") ;colors = (/"white","black","grey59","palevioletred1","deepskyblue2","aquamarine4","maroon2","red3","darkorange","blue","darkgreen","purple"/) - wks = gsn_open_wks ("eps",dirfig +"wvEWR_MJOav_250mb" + Domain + "_updated") avdataP = crr_M avdataT = EWR ;E/W ratio - ;print(avdataP) - ;print(avdataT) acr = esccr(avdataP(1:),avdataT(1:),0) - ;print(acr) gsn_define_colormap(wks,"wgne15") ;---------------------------------------------------------------------- @@ -113,12 +104,10 @@ print("read in zonal wind RMS error data ") amaxy = maxy - abs(miny) amaxy = amaxy / 2. -;print(amaxy) amaxy = amaxy + 1.0 aminy = maxy - abs(miny) aminy = aminy / 2. -;print(aminy) aminy = aminy - 1.0 ;---Resources for X/Y axes. @@ -281,19 +270,14 @@ delete(acr) delete(avdataP) - print("250hPa zonal mean wind error plot") print("--------------------------------------------------------------------") mod_goodall = new((/dimsizes(model_good)+1/),integer) mod_goodall(0) = 0 mod_goodall(1:) = model_good + 1 -;print(mod_goodall) avdataP = crr_M(mod_goodall) -;print(dimsizes(avdataP)) -;print(dimsizes(RMS_U_M)) -;print(avdataP) ;print("make BS metric plot") avdataT = new((/2,dimsizes(RMS_U_M)/),typeof(RMS_U_M)) @@ -302,17 +286,11 @@ avdataP = crr_M(mod_goodall) acr = esccr(avdataP(1:),avdataT(0,1:),0) acr2 = esccr(avdataP(1:),avdataT(1,1:),0) - ;print(acr + " " + acr2) - ;t-statistic - ;print("t-statistic") n = dimsizes(avdataP(1:)) ;-1 to exclude reanalysis -; print(n) tstat = acr * sqrt(n-2.)/sqrt(1.-acr^2.) - ;print(tstat) - ;-- make plot of BS metrics -- @@ -364,7 +342,6 @@ delete(colors) colModel = (/3,2,4,7,8,11,13,14,15,16,18/) colors = new ((/dimsizes(model_good)+1/),"string") - ;print(colModel(model_good)) colors (0) = "black" colors (1:) = colModel(model_good) @@ -381,10 +358,7 @@ colModel = (/3,2,4,7,8,11,13,14,15,16,18/) res@tiXAxisFontHeightF = 0.0135 - ;GPCP","bcc_csm1_1","CanESM2","CNRM_CM5","GFDL_CM3","GFDL_ESM2G","GFDL_ESM2M","IPSL_CM5A_MR" -;print(avdataT) -;print(avdataP) res@xyMarkers = (/2,2,2,2,2,2,2,2/) plot_o = gsn_csm_xy (wks,transpose((/avdataT(0,:),avdataT(0,:)/)), \ @@ -401,8 +375,6 @@ colModel = (/3,2,4,7,8,11,13,14,15,16,18/) ;---------------------------------------------------------------------- ; Draw some individual labelbars. ;---------------------------------------------------------------------- -;delete(colors1) - labels2 = labels1(model_good) colors2 = colors1(model_good) @@ -434,10 +406,8 @@ do i = 0,dimsizes( labels2)-1 ; Draw some markers and text. ;---------------------------------------------------------------------- - frame(wks) ; Now advance the frame. print("ploting of figures are done") - end diff --git a/diagnostics/MJO_teleconnection/mjo_diag_geop_hgt_comp_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_geop_hgt_comp_MDTF.ncl index aaf98abc6..f949656ef 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_geop_hgt_comp_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_geop_hgt_comp_MDTF.ncl @@ -39,7 +39,7 @@ old_model = True ;------------------------------------------------------------------------------------- ;--directory for output files-- dirobs = getenv("OBS_DATA")+"/" ;Obs input directory -diro = getenv("WK_DIR")+"/model/netCDF/" ;Output data directory +diro = getenv("WORK_DIR")+"/model/netCDF/" ;Output data directory filz250 = getenv("z250_file") ; input file tfile = "t_table_data.txt" print("Output data path :" + diro) diff --git a/diagnostics/MJO_teleconnection/mjo_diag_prec_comp_MDTF.ncl b/diagnostics/MJO_teleconnection/mjo_diag_prec_comp_MDTF.ncl index 735707c8f..5bec4fcf8 100644 --- a/diagnostics/MJO_teleconnection/mjo_diag_prec_comp_MDTF.ncl +++ b/diagnostics/MJO_teleconnection/mjo_diag_prec_comp_MDTF.ncl @@ -37,7 +37,7 @@ print("Model: " + Model) ;--directory for output files-- ;------------------------------------------------------------------------------------- dirobs = getenv("OBS_DATA")+"/" ;Output data directory -diro = getenv("WK_DIR")+"/model/netCDF/" +diro = getenv("WORK_DIR")+"/model/netCDF/" filpr = getenv("prec_file") ; input file print("Output data path :" + diro) diff --git a/diagnostics/MJO_teleconnection/mjo_teleconnection.py b/diagnostics/MJO_teleconnection/mjo_teleconnection.py index 30c335441..a966385ea 100644 --- a/diagnostics/MJO_teleconnection/mjo_teleconnection.py +++ b/diagnostics/MJO_teleconnection/mjo_teleconnection.py @@ -57,15 +57,17 @@ # ========================================================================================================= # OPEN SOURCE COPYRIGHT Agreement TBA # ====================================================================== -#============================================================ +# ============================================================ import os import subprocess import time -#============================================================ +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_plots(nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call @@ -82,7 +84,7 @@ def generate_ncl_plots(nclPlotFile): while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 @@ -92,7 +94,7 @@ def generate_ncl_plots(nclPlotFile): print("-----------------------------------------------------------------------") # create synonyms for env var names to avoid changes to rest of this POD's code os.environ["prec_file"] = os.environ["PR_FILE"] -os.environ["olr_file"] = os.environ["RLUT_FILE"] +os.environ["olr_file"] = os.environ["RLUT_FILE"] os.environ["u850_file"] = os.environ["U850_FILE"] os.environ["u250_file"] = os.environ["U250_FILE"] os.environ["z250_file"] = os.environ["Z250_FILE"] @@ -109,18 +111,18 @@ def generate_ncl_plots(nclPlotFile): print(os.environ["u250_file"]) print(os.environ["z250_file"]) print("-----------------------------------------------------------------------") -#=================================================================================== +# =================================================================================== # Set up directories -#=================================================================================== +# =================================================================================== - if not os.path.exists(os.environ["WK_DIR"]+"/htmls"): - os.makedirs(os.environ["WK_DIR"]+"/htmls") + if not os.path.exists(os.environ["WORK_DIR"]+"/htmls"): + os.makedirs(os.environ["WORK_DIR"]+"/htmls") -#====================================================================================== +# ====================================================================================== # Calling a NCL script to calculate RMM index of a given model data -#====================================================================================== - os.environ["strtdy"] = os.environ["FIRSTYR"]+"0101" - os.environ["lastdy"] = os.environ["LASTYR"] +"1231" +# ====================================================================================== + os.environ["strtdy"] = os.environ["startdate"]+"0101" + os.environ["lastdy"] = os.environ["enddate"] +"1231" os.chdir(os.environ["DATADIR"]) @@ -133,9 +135,7 @@ def generate_ncl_plots(nclPlotFile): generate_ncl_plots(os.environ["POD_HOME"]+"/mjo_diag_fig1_MDTF.ncl") generate_ncl_plots(os.environ["POD_HOME"]+"/mjo_diag_fig2_MDTF.ncl") - - -#============================================================ +# ============================================================ print("-----------------------------------------------------------------------------") print("|----Execution of MJO Teleconnections diagnostics module is completed now----|") print("=============================================================================") diff --git a/diagnostics/MJO_teleconnection/settings.jsonc b/diagnostics/MJO_teleconnection/settings.jsonc index 923d8fd90..b0b296141 100644 --- a/diagnostics/MJO_teleconnection/settings.jsonc +++ b/diagnostics/MJO_teleconnection/settings.jsonc @@ -14,7 +14,7 @@ "settings" : { "driver" : "mjo_teleconnection.py", "long_name" : "MJO teleconnection Diagnostic", - "realm" : "atmos", + "convention" : "cesm", "description": "MJO Teleconnections Diagnostics, see Henderson et al., J. Climate, vol 30, No. 12, 4567-4587, 2017", "runtime_requirements": { "python3": [], @@ -25,8 +25,16 @@ "frequency": "day" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -38,28 +46,33 @@ "varlist": { "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"] }, "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "dimensions": ["time", "lat", "lon"] }, "u250": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 250} }, "u850": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 850} }, "z250": { "standard_name": "geopotential_height", + "realm": "atmos", "units": "m", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 250} diff --git a/diagnostics/SM_ET_coupling/SM_ET_coupling.R b/diagnostics/SM_ET_coupling/SM_ET_coupling.R index 1a69f0359..081081c26 100644 --- a/diagnostics/SM_ET_coupling/SM_ET_coupling.R +++ b/diagnostics/SM_ET_coupling/SM_ET_coupling.R @@ -6,12 +6,12 @@ library(fields) library(akima) library(ncdf4) -WK_DIR <- Sys.getenv("WK_DIR") +WK_DIR <- Sys.getenv("WORK_DIR") OBS_DATA <- Sys.getenv("OBS_DATA") DATADIR <- Sys.getenv("DATADIR") CASENAME <- Sys.getenv("CASENAME") -yr1 <- Sys.getenv("FIRSTYR") -yr2 <- Sys.getenv("LASTYR") +yr1 <- Sys.getenv("startdate") +yr2 <- Sys.getenv("enddate") MRSOS_FILE <- Sys.getenv("MRSOS_FILE") EVSPSBL_FILE <- Sys.getenv("EVSPSBL_FILE") PR_FILE <- Sys.getenv("PR_FILE") diff --git a/diagnostics/SM_ET_coupling/SM_ET_coupling.html b/diagnostics/SM_ET_coupling/SM_ET_coupling.html index 53338e7ea..1b6874708 100644 --- a/diagnostics/SM_ET_coupling/SM_ET_coupling.html +++ b/diagnostics/SM_ET_coupling/SM_ET_coupling.html @@ -4,14 +4,24 @@ Soil Moisture-Evapotranspiration coupling

-This module computes the correlation between surface soil moisture (SM; top-10CM) and evapotranspiration (ET), at the interannual time scale, using summertime-mean values (JJA in the Northern Hemisphere, DJF in the Southern Hemisphere). Positive correlations indicate that SM controls ET variations. Negative correlations indicate that ET is ernergy-limited (by radiation and temperature). -The degree of coupling between SM and ET depends on how wet the local climate is; thus differences in precipitation between model and observations (independent, in first approximation, from the surface) induce differences in SM-ET coupling. Across CMIP5 models, the degree of SM-ET coupling is closely correlated with the amount of summertime rainfall; using this relationship (see Berg and Sheffield 2018, Figure 3), we correct the estimate of SM-ET coupling for the model by accounting for differences in summertime precipitation. In other words, the Pr-corrected estimate of SM-ET coupling is an estimate of the coupling that would be if precipitation in the model was equal to observed rainfall. + This module computes the correlation between surface soil moisture (SM; top-10CM) and evapotranspiration (ET), + at the interannual time scale, using summertime-mean values (JJA in the Northern Hemisphere, + DJF in the Southern Hemisphere). Positive correlations indicate that SM controls ET variations. + Negative correlations indicate that ET is ernergy-limited (by radiation and temperature). + The degree of coupling between SM and ET depends on how wet the local climate is; + thus differences in precipitation between model and observations (independent, in first approximation, + from the surface) induce differences in SM-ET coupling. Across CMIP5 models, the degree of SM-ET coupling + is closely correlated with the amount of summertime rainfall; using this relationship + (see Berg and Sheffield 2018, Figure 3), we correct the estimate of SM-ET coupling for the model by accounting + for differences in summertime precipitation. In other words, the Pr-corrected estimate of SM-ET coupling is an + estimate of the coupling that would be if precipitation in the model was equal to observed rainfall.

-Full Documentation and Contact Information + + Full Documentation and Contact Information

-
Soil Moisture-EvapoTranspiration coupling +< color=navy> Soil Moisture-EvapoTranspiration coupling {{CASENAME}} OBS (GLEAM) Model-OBS diff --git a/diagnostics/SM_ET_coupling/SM_ET_coupling.py b/diagnostics/SM_ET_coupling/SM_ET_coupling.py index 7af1e0dfe..5d27aadc6 100644 --- a/diagnostics/SM_ET_coupling/SM_ET_coupling.py +++ b/diagnostics/SM_ET_coupling/SM_ET_coupling.py @@ -1,6 +1,6 @@ # This file is part of the SM_ET_coupling module of the MDTF code package (see LICENSE.txt) -#============================================================ +# ============================================================ # Coupling between soil moisture (SM) and evapotanspiration (ET) in summer # Sample code to call R from python # Code written by Alexis Berg @@ -8,16 +8,19 @@ # This module calculates the correlations between SM and ET, as in Berg and Sheffield (2018), Fig.1a. # # Reference: -# Berg and Sheffield (2018), Soil moisture-evapotranspiration coupling in CMIP5 models: relationship with simulated climate and projections, Journal of Climate, 31(12), 4865-4878. -#============================================================ +# Berg and Sheffield (2018), Soil moisture-evapotranspiration coupling in CMIP5 models: +# relationship with simulated climate and projections, Journal of Climate, 31(12), 4865-4878. +# ============================================================ import os import subprocess import time -#============================================================ +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_R_plots(RPlotFile): """generate_plots_call - call a RPlotFile via subprocess call @@ -28,17 +31,17 @@ def generate_R_plots(RPlotFile): # don't exit if it does not exists just print a warning. try: pipe = subprocess.Popen([ - 'Rscript --verbose --vanilla {}'.format(RPlotFile) - ] , shell=True, stdout=subprocess.PIPE) + 'Rscript --verbose --vanilla {}'.format(RPlotFile)], shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].decode() - print('R routine {0} \n {1}'.format(RPlotFile,output)) + print('R routine {0} \n {1}'.format(RPlotFile, output)) while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno ,e.strerror) return 0 + if os.path.isfile(os.environ["MRSOS_FILE"]): print("monthly soil moisture file found") @@ -47,16 +50,12 @@ def generate_R_plots(RPlotFile): print("computing SM-ET coupling") - -#============================================================ +# ============================================================ # Call R code here -#============================================================ +# ============================================================ print("--------- Starting SM_ET coupling generate figures (using R)----------------------------") - if ( True ): + if True: generate_R_plots(os.environ["POD_HOME"]+"/SM_ET_coupling.R") - else: - print("WARNING: For testing purposes, skipping SM_ET coupling figure generation") - print("--------- Finished SM_ET coupling generate figures----------------------------") else: diff --git a/diagnostics/SM_ET_coupling/settings.jsonc b/diagnostics/SM_ET_coupling/settings.jsonc index ce1a2f101..2850b3ff4 100644 --- a/diagnostics/SM_ET_coupling/settings.jsonc +++ b/diagnostics/SM_ET_coupling/settings.jsonc @@ -9,6 +9,7 @@ "driver" : "SM_ET_coupling.py", "long_name" : "Coupling between Soil Moisture and EvapoTranspiration", "realm" : ["atmos", "land"], + "convention" : "cmip", "description" : "Coupling of Soil Moisture with Evapotranspiration", "runtime_requirements": { "python3": [], @@ -19,23 +20,34 @@ "frequency": "mon" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"} }, "varlist" : { "mrsos": { "standard_name": "mass_content_of_water_in_soil_layer", + "realm" : "land", "units": "kg m-2", "dimensions": ["time", "lat", "lon"] }, "evspsbl": { "standard_name": "water_evapotranspiration_flux", + "realm": "land", "units": "kg m-2 s-1", "dimensions": ["time", "lat", "lon"] }, "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "dimensions": ["time", "lat", "lon"] } diff --git a/diagnostics/TC_MSE/Binning_and_compositing.py b/diagnostics/TC_MSE/Binning_and_compositing.py index ad5db7a60..4a8fda035 100644 --- a/diagnostics/TC_MSE/Binning_and_compositing.py +++ b/diagnostics/TC_MSE/Binning_and_compositing.py @@ -1,54 +1,58 @@ -#Import modules +# Import modules import os import numpy as np import xarray as xr -########## BINNING/COMPOSITING MODEL DATA ############################################################# +# BINNING/COMPOSITING MODEL DATA ############################################################# -######################## MATH FUNCTION(S) ############################################ -def boxavg(thing,lat,lon): - coslat_values = np.transpose(np.tile(np.cos(np.deg2rad(lat)),(len(lon),1))) +# MATH FUNCTION(S) ############################################ + + +def boxavg(thing, lat, lon): + coslat_values = np.transpose(np.tile(np.cos(np.deg2rad(lat)), (len(lon), 1))) thing1 = thing*coslat_values thing2 = thing1/thing1 average = np.nansum(np.nansum(thing1,0))/np.nansum(np.nansum(coslat_values*thing2,0)) return average -#Lats/Lons -latres = np.float(os.getenv("latres")) -lonres = np.float(os.getenv("lonres")) -lats = np.arange(-5,5+latres,latres) -lons = np.arange(-5,5+lonres,lonres) -#Gather the years that were inputted by user -FIRST_YR = np.int(os.getenv("FIRSTYR")) -LAST_YR = np.int(os.getenv("LASTYR")) +# Lats/Lons + + +latres = float(os.getenv("latres")) +lonres = float(os.getenv("lonres")) +lats = np.arange(-5, 5+latres, latres) +lons = np.arange(-5, 5+lonres, lonres) +# Gather the years that were inputted by user +FIRST_YR = np.int(os.getenv("startdate")) +LAST_YR = np.int(os.getenv("enddate")) ds_all = [] for y in range(FIRST_YR,LAST_YR+1): - #Open all the yearly snapshot (budget and regular variable) files - ds_reg = xr.open_dataset(os.environ['WK_DIR']+'/model/Model_Regular_Variables_'+str(y)+'.nc') - ds_budg = xr.open_dataset(os.environ['WK_DIR']+'/model/Model_Budget_Variables_'+str(y)+'.nc') - #Merge the budget and regular variable files by year + # Open all the yearly snapshot (budget and regular variable) files + ds_reg = xr.open_dataset(os.environ['WORK_DIR'] + '/model/Model_Regular_Variables_' + str(y) + '.nc') + ds_budg = xr.open_dataset(os.environ['WORK_DIR']+'/model/Model_Budget_Variables_' + str(y) + '.nc') + # Merge the budget and regular variable files by year ds_merge = xr.merge([ds_reg,ds_budg]) ds_reg.close() ds_budg.close() - #Get all the merged files together so that once all are collected they can be concatenated + # Get all the merged files together so that once all are collected they can be concatenated ds_all.append(ds_merge) ds_merge.close() -#Concatenate the year files together so all variables are combined across all storms -data = xr.concat(ds_all,dim='numstorms') +# Concatenate the year files together so all variables are combined across all storms +data = xr.concat(ds_all, dim='numstorms') -#Get a list of the data variables in data to trim the data after lifetime maximum intensity (LMI) +# Get a list of the data variables in data to trim the data after lifetime maximum intensity (LMI) Model_vars = list(data.keys()) -#Grab the vmax variable to get the LMI itself and point of LMI for trimming to account only for intensification period +# Grab the vmax variable to get the LMI itself and point of LMI for trimming to account only for intensification period maxwinds = data['maxwind'] winds_list = [] -#Loop through the variables to pick out the feedbacks and add a normalized version of that variable +# Loop through the variables to pick out the feedbacks and add a normalized version of that variable for var in Model_vars: - if(var[0:5]=='hanom' or var[0:10]=='hMoistanom' or var[0:10]=='hTempanom' or var[0:4]=='hvar'): + if var[0:5] == 'hanom' or var[0:10] == 'hMoistanom' or var[0:10] == 'hTempanom' or var[0:4] == 'hvar': normvar = np.array(data[var]) boxavrawvar = np.array(data[var]) boxavvar = np.ones((len(maxwinds),len(maxwinds[0]))) * np.nan @@ -56,115 +60,120 @@ def boxavg(thing,lat,lon): for s in range(len(maxwinds)): for t in range(len(maxwinds[s])): hvar = np.array(data.hvar[s][t][:][:]) - boxavghvar = boxavg(hvar,np.array(data.latitude[s][t][:]),np.array(data.longitude[s][t][:])) + boxavghvar = boxavg(hvar,np.array(data.latitude[s][t][:]), np.array(data.longitude[s][t][:])) normvar[s][t][:][:] = normvar[s][t][:][:]/boxavghvar - boxavvar[s][t] = boxavg(boxavrawvar[s][t][:][:],np.array(data.latitude[s][t][:]),np.array(data.longitude[s][t][:])) - boxavnormvar[s][t] = boxavg(np.array(normvar[s][t][:][:]),np.array(data.latitude[s][t][:]),np.array(data.longitude[s][t][:])) - data['norm'+var] = (['numstorms','numsteps','latlen','lonlen'],np.array(normvar[:][:][:][:])) - data['boxav_'+var] = (['numstorms','numsteps'],np.array(boxavvar[:][:])) - data['boxav_norm_'+var] = (['numstorms','numsteps'],np.array(boxavnormvar[:][:])) - -#Loop through all model storms and find the LMI, then tag each storm with its LMI for binning later + boxavvar[s][t] = boxavg(boxavrawvar[s][t][:][:], np.array(data.latitude[s][t][:]), + np.array(data.longitude[s][t][:])) + boxavnormvar[s][t] = boxavg(np.array(normvar[s][t][:][:]), np.array(data.latitude[s][t][:]), + np.array(data.longitude[s][t][:])) + data['norm' + var] = (['numstorms', 'numsteps', 'latlen', 'lonlen'], np.array(normvar[:][:][:][:])) + data['boxav_' + var] = (['numstorms', 'numsteps'], np.array(boxavvar[:][:])) + data['boxav_norm_' + var] = (['numstorms', 'numsteps'], np.array(boxavnormvar[:][:])) + +# Loop through all model storms and find the LMI, then tag each storm with its LMI for binning later for s in range(len(maxwinds)): windmax = float(max(maxwinds[s][:])) - windmaxindex = np.squeeze(np.where(maxwinds[s]==windmax)) - #Check if there are more than one maximum wind speed + windmaxindex = np.squeeze(np.where(maxwinds[s] == windmax)) + # Check if there are more than one maximum wind speed if windmaxindex.size >= 2: windmaxindex = int(windmaxindex[0]) else: - windmaxindex = int(np.squeeze(np.where(maxwinds[s]==windmax))) - #Loop and have all the indices after the timestep of LMI be NaN for all vars + windmaxindex = int(np.squeeze(np.where(maxwinds[s] == windmax))) + # Loop and have all the indices after the timestep of LMI be NaN for all vars for var in Model_vars: - data[var][s,windmaxindex+1:len(maxwinds[s])+1] = np.nan + data[var][s, windmaxindex+1:len(maxwinds[s])+1] = np.nan vmax_indiv_list = [] - for t in range(0,len(maxwinds[s])): - #First check and NaN all variables at timesteps where TC center is outside 30 N/S - if(data.centerLat[s][t]>30 or data.centerLat[s][t]<-30): + for t in range(0, len(maxwinds[s])): + # First check and NaN all variables at timesteps where TC center is outside 30 N/S + if data.centerLat[s][t] > 30 or data.centerLat[s][t] < -30: for var in Model_vars: - if(data[var].ndim==2): + if data[var].ndim == 2: data[var][s][t] = np.nan - elif(data[var].ndim==3): + elif data[var].ndim == 3: data[var][s][t][:] = np.nan else: data[var][s][t][:][:] = np.nan - #Get max wind at specific step to tag the steps for binning snapshot + # Get max wind at specific step to tag the steps for binning snapshot vmax_sel = maxwinds[s,t].values - vmax = xr.full_like(data.h[s,t],float(vmax_sel)).rename('vmax') + vmax = xr.full_like(data.h[s, t], float(vmax_sel)).rename('vmax') vmax_indiv_list.append(vmax) - vmax_indiv_array = xr.concat(vmax_indiv_list,dim='numsteps') - #Create the vmax tag variable + vmax_indiv_array = xr.concat(vmax_indiv_list, dim='numsteps') + # Create the vmax tag variable winds_list.append(vmax_indiv_array) -#Update Model data with the vmax tag created above -model_winds_array = xr.concat(winds_list,dim='numstorms') -model_updated = xr.merge([data,model_winds_array]) -#Stretch the boxav variables to 1 dimension and make a new stretched windmax variable +# Update Model data with the vmax tag created above +model_winds_array = xr.concat(winds_list, dim='numstorms') +model_updated = xr.merge([data, model_winds_array]) +# Stretch the boxav variables to 1 dimension and make a new stretched windmax variable newvars = list(model_updated.keys()) for var in newvars: - if(var[0:5]=='boxav'): - model_updated['new_'+var] = (['newsteps'],np.squeeze(np.reshape(np.array(model_updated[var]),(len(data.numstorms)*len(data.numsteps))))) + if var[0:5] == 'boxav': + (model_updated)['new_' + var] = (['newsteps'], np.squeeze(np.reshape(np.array(model_updated[var]), + (len(data.numstorms)*len(data.numsteps))))) -model_updated['new_maxwind'] = (['newsteps'],np.squeeze(np.reshape(np.array(model_updated['maxwind']),(len(data.numstorms)*len(data.numsteps))))) +model_updated['new_maxwind'] = (['newsteps'], np.squeeze(np.reshape(np.array(model_updated['maxwind']), + (len(data.numstorms)*len(data.numsteps))))) -#Bin snapshots according to max wind speed bins +# Bin snapshots according to max wind speed bins bins = np.arange(0,66,3) -#Set a count array to gather the sample size for each bin and all bins +# Set a count array to gather the sample size for each bin and all bins count_denom = len(data.latitude[0][0]) * len(data.longitude[0][0]) bins_count = np.zeros(len(bins)) vmax2 = model_updated.vmax.copy(deep=True) onedvmax = model_updated.new_maxwind.copy(deep=True) for b, bin in enumerate(bins): upperbin = bin+3 - #Variable to get the number of samples for the current bin (divide by the resolution dims multiplied together) + # Variable to get the number of samples for the current bin (divide by the resolution dims multiplied together) count = (len(np.where((model_updated.vmax>=bin)&(model_updated.vmax=bin)&(model_updated.vmax=bin)&(model_updated.new_maxwind2): - #Append the bins that are >2 + if (Model_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 Model_bins.append(b) - #Box avgs of variables + # Box avgs of variables Modelboxavgvars.append((Model_mean_binned_data[modelvar].sel(bin=b))) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - Modelstderrorbars.append((Model_std_binned_data[modelvar].sel(bin=b))*1.96/((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2))) - if(ERAINT_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Gets the range of error bar for 5 to 95% confidence interval at each bin + Modelstderrorbars.append((Model_std_binned_data[modelvar].sel(bin=b)) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2))) + if (ERAINT_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 ERAINT_bins.append(b) - #Box avgs of variables + # Box avgs of variables ERAINTboxavgvars.append((ERAINT_mean_binned_data[reanalysisvar].sel(bin=b))) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - ERAINTstderrorbars.append((ERAINT_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2))) - if(ERA5_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Gets the range of error bar for 5 to 95% confidence interval at each bin + ERAINTstderrorbars.append((ERAINT_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2))) + if (ERA5_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 ERA5_bins.append(b) - #Box avgs of variables + # Box avgs of variables ERA5boxavgvars.append((ERA5_mean_binned_data[reanalysisvar].sel(bin=b))) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - ERA5stderrorbars.append((ERA5_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2))) - if(CFSR_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Gets the range of error bar for 5 to 95% confidence interval at each bin + ERA5stderrorbars.append((ERA5_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2))) + if (CFSR_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 CFSR_bins.append(b) - #Box avgs of variables + # Box avgs of variables CFSRboxavgvars.append((CFSR_mean_binned_data[reanalysisvar].sel(bin=b))) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - CFSRstderrorbars.append((CFSR_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2))) - if(MERRA2_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Gets the range of error bar for 5 to 95% confidence interval at each bin + CFSRstderrorbars.append((CFSR_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2))) + if (MERRA2_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 MERRA2_bins.append(b) - #Box avgs of variables + # Box avgs of variables MERRA2boxavgvars.append((MERRA2_mean_binned_data[reanalysisvar].sel(bin=b))) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - MERRA2stderrorbars.append((MERRA2_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2))) - if(JRA55_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Gets the range of error bar for 5 to 95% confidence interval at each bin + MERRA2stderrorbars.append((MERRA2_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2))) + if (JRA55_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 JRA55_bins.append(b) - #Box avgs of variables + # Box avgs of variables JRA55boxavgvars.append((JRA55_mean_binned_data[reanalysisvar].sel(bin=b))) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - JRA55stderrorbars.append((JRA55_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2))) - - #CFSR - ax.plot(CFSR_bins,CFSRboxavgvars,color='black',label='CFSR', linewidth=4) - ax.errorbar(CFSR_bins,CFSRboxavgvars,yerr=CFSRstderrorbars,color='black', linewidth=4) - #ERA-Int - ax.plot(ERAINT_bins,ERAINTboxavgvars,color='dimgrey',label='ERA-Int', linewidth=4) - ax.errorbar(ERAINT_bins,ERAINTboxavgvars,yerr=ERAINTstderrorbars,color='dimgrey', linewidth=4) - #ERA-5 - ax.plot(ERA5_bins,ERA5boxavgvars,color='grey',label='ERA-5', linewidth=4) - ax.errorbar(ERA5_bins,ERA5boxavgvars,yerr=ERA5stderrorbars,color='grey', linewidth=4) - #JRA-55 - ax.plot(JRA55_bins,JRA55boxavgvars,color='darkgrey',label='JRA-55', linewidth=4) - ax.errorbar(JRA55_bins,JRA55boxavgvars,yerr=JRA55stderrorbars,color='darkgrey', linewidth=4) - #MERRA-2 - ax.plot(MERRA2_bins,MERRA2boxavgvars,color='lightgrey',label='MERRA-2', linewidth=4) - ax.errorbar(MERRA2_bins,MERRA2boxavgvars,yerr=MERRA2stderrorbars,color='lightgrey', linewidth=4) - #Model - ax.plot(Model_bins,Modelboxavgvars,color='red',label=modelname, linewidth=4) - ax.errorbar(Model_bins,Modelboxavgvars,yerr=Modelstderrorbars,color='red', linewidth=4) - #Title and legend - ax.legend(fontsize=15,loc='upper left') + # Gets the range of error bar for 5 to 95% confidence interval at each bin + JRA55stderrorbars.append((JRA55_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2))) + + # CFSR + ax.plot(CFSR_bins, CFSRboxavgvars, color='black', label='CFSR', linewidth=4) + ax.errorbar(CFSR_bins, CFSRboxavgvars, yerr=CFSRstderrorbars, color='black', linewidth=4) + # ERA-Int + ax.plot(ERAINT_bins, ERAINTboxavgvars, color='dimgrey', label='ERA-Int', linewidth=4) + ax.errorbar(ERAINT_bins, ERAINTboxavgvars, yerr=ERAINTstderrorbars, color='dimgrey', linewidth=4) + # ERA-5 + ax.plot(ERA5_bins, ERA5boxavgvars, color='grey', label='ERA-5', linewidth=4) + ax.errorbar(ERA5_bins, ERA5boxavgvars, yerr=ERA5stderrorbars, color='grey', linewidth=4) + # JRA-55 + ax.plot(JRA55_bins, JRA55boxavgvars, color='darkgrey', label='JRA-55', linewidth=4) + ax.errorbar(JRA55_bins, JRA55boxavgvars, yerr=JRA55stderrorbars, color='darkgrey', linewidth=4) + # MERRA-2 + ax.plot(MERRA2_bins, MERRA2boxavgvars, color='lightgrey', label='MERRA-2', linewidth=4) + ax.errorbar(MERRA2_bins, MERRA2boxavgvars, yerr=MERRA2stderrorbars, color='lightgrey', linewidth=4) + # Model + ax.plot(Model_bins, Modelboxavgvars, color='red', label=modelname, linewidth=4) + ax.errorbar(Model_bins, Modelboxavgvars, yerr=Modelstderrorbars, color='red', linewidth=4) + # Title and legend + ax.legend(fontsize=15, loc='upper left') ax.set_title(titlevar, fontweight='bold', fontsize=25) - ax.set_ylabel(units,fontweight='bold',fontsize=25) - ax.set_xlabel('Mean Wind Speed [m/s]',fontweight='bold', fontsize=25) - if(titlevar=="Variance of h"): - ax.set_ylim(0,1.5e+15) + ax.set_ylabel(units, fontweight='bold', fontsize=25) + ax.set_xlabel('Mean Wind Speed [m/s]', fontweight='bold', fontsize=25) + if (titlevar == "Variance of h"): + ax.set_ylim(0, 1.5e+15) ax.tick_params(labelsize=25) ax.yaxis.get_offset_text().set_fontsize(25) - #Final plot adjustments - plt.suptitle('Box Average of Bin Composite for Feedback Terms',fontweight='bold',fontsize=40) - plt.subplots_adjust(hspace=0.35,wspace=0.3) - plt.savefig(os.environ['WK_DIR']+'/Box_Average_Plots.pdf') + # Final plot adjustments + plt.suptitle('Box Average of Bin Composite for Feedback Terms', fontweight='bold', fontsize=40) + plt.subplots_adjust(hspace=0.35, wspace=0.3) + plt.savefig(os.environ['WK_DIR'] + '/Box_Average_Plots.pdf') plt.close() - #Now do the normalized version + # Now do the normalized version plt.rcParams["font.weight"] = "bold" plt.rcParams["axes.labelweight"] = "bold" - fig,axs = plt.subplots(ncols=3,figsize=(30,10)) - for r in range(0,3): - ax=axs[r] - if(r==0): - modelvar='new_boxav_norm_hanom_SEFanom' + fig, axs = plt.subplots(ncols=3, figsize=(30, 10)) + for r in range(0, 3): + ax = axs[r] + if (r == 0): + modelvar = 'new_boxav_norm_hanom_SEFanom' reanalysisvar = 'new_boxav_norm_hSEF_concat' titlevar = "h'SEF'" - elif(r==1): - modelvar='new_boxav_norm_hanom_LWanom' + elif (r == 1): + modelvar = 'new_boxav_norm_hanom_LWanom' reanalysisvar = 'new_boxav_norm_hLW_concat' titlevar = "h'LW'" - elif(r==2): - modelvar='new_boxav_norm_hanom_SWanom' + elif (r == 2): + modelvar = 'new_boxav_norm_hanom_SWanom' reanalysisvar = 'new_boxav_norm_hSW_concat' titlevar = "h'SW'" else: @@ -599,7 +752,7 @@ def BoxAvLinePlotting(): CFSR_bins = [] MERRA2_bins = [] JRA55_bins = [] - + Modelboxavgnormvars = [] Modelnormstderrorbars = [] ERAINTboxavgnormvars = [] @@ -614,114 +767,121 @@ def BoxAvLinePlotting(): MERRA2normstderrorbars = [] for b in Model_mean_binned_data.bin[2:]: - if(Model_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + if (Model_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 Model_bins.append(b) - #Box avgs of variables - Modelboxavgnormvars.append((Model_mean_binned_data[modelvar].sel(bin=b))*86400) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - Modelnormstderrorbars.append((Model_std_binned_data[modelvar].sel(bin=b))*1.96/((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2))*86400) - if(ERAINT_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Box avgs of variables + Modelboxavgnormvars.append((Model_mean_binned_data[modelvar].sel(bin=b)) * 86400) + # Gets the range of error bar for 5 to 95% confidence interval at each bin + Modelnormstderrorbars.append((Model_std_binned_data[modelvar].sel(bin=b)) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400) + if (ERAINT_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 ERAINT_bins.append(b) - #Box avgs of variables - ERAINTboxavgnormvars.append((ERAINT_mean_binned_data[reanalysisvar].sel(bin=b))*86400) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - ERAINTnormstderrorbars.append((ERAINT_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2))*86400) - if(ERA5_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Box avgs of variables + ERAINTboxavgnormvars.append((ERAINT_mean_binned_data[reanalysisvar].sel(bin=b)) * 86400) + # Gets the range of error bar for 5 to 95% confidence interval at each bin + ERAINTnormstderrorbars.append((ERAINT_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400) + if (ERA5_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 ERA5_bins.append(b) - #Box avgs of variables - ERA5boxavgnormvars.append((ERA5_mean_binned_data[reanalysisvar].sel(bin=b))*86400) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - ERA5normstderrorbars.append((ERA5_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2))*86400) - if(CFSR_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Box avgs of variables + ERA5boxavgnormvars.append((ERA5_mean_binned_data[reanalysisvar].sel(bin=b)) * 86400) + # Gets the range of error bar for 5 to 95% confidence interval at each bin + ERA5normstderrorbars.append((ERA5_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400) + if (CFSR_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 CFSR_bins.append(b) - #Box avgs of variables - CFSRboxavgnormvars.append((CFSR_mean_binned_data[reanalysisvar].sel(bin=b))*86400) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - CFSRnormstderrorbars.append((CFSR_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2))*86400) - if(MERRA2_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >2 + # Box avgs of variables + CFSRboxavgnormvars.append((CFSR_mean_binned_data[reanalysisvar].sel(bin=b)) * 86400) + # Gets the range of error bar for 5 to 95% confidence interval at each bin + CFSRnormstderrorbars.append((CFSR_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400) + if (MERRA2_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >2 MERRA2_bins.append(b) - #Box avgs of variables - MERRA2boxavgnormvars.append((MERRA2_mean_binned_data[reanalysisvar].sel(bin=b))*86400) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - MERRA2normstderrorbars.append((MERRA2_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2))*86400) - if(JRA55_mean_binned_data.bincounts.sel(bin=b)>2): - #Append the bins that are >0 + # Box avgs of variables + MERRA2boxavgnormvars.append((MERRA2_mean_binned_data[reanalysisvar].sel(bin=b)) * 86400) + # Gets the range of error bar for 5 to 95% confidence interval at each bin + MERRA2normstderrorbars.append((MERRA2_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400) + if (JRA55_mean_binned_data.bincounts.sel(bin=b) > 2): + # Append the bins that are >0 JRA55_bins.append(b) - #Box avgs of variables - JRA55boxavgnormvars.append((JRA55_mean_binned_data[reanalysisvar].sel(bin=b))*86400) - #Gets the range of error bar for 5 to 95% confidence interval at each bin - JRA55normstderrorbars.append((JRA55_std_binned_data[reanalysisvar].sel(bin=b))*1.96/((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2))*86400) - - #CFSR - ax.plot(CFSR_bins,CFSRboxavgnormvars,color='black',label='CFSR', linewidth=4) - ax.errorbar(CFSR_bins,CFSRboxavgnormvars,yerr=CFSRnormstderrorbars,color='black', linewidth=4) - #ERA-Int - ax.plot(ERAINT_bins,ERAINTboxavgnormvars,color='dimgrey',label='ERA-Int', linewidth=4) - ax.errorbar(ERAINT_bins,ERAINTboxavgnormvars,yerr=ERAINTnormstderrorbars,color='dimgrey', linewidth=4) - #ERA-5 - ax.plot(ERA5_bins,ERA5boxavgnormvars,color='grey',label='ERA-5', linewidth=4) - ax.errorbar(ERA5_bins,ERA5boxavgnormvars,yerr=ERA5normstderrorbars,color='grey', linewidth=4) - #JRA-55 - ax.plot(JRA55_bins,JRA55boxavgnormvars,color='darkgrey',label='JRA-55', linewidth=4) - ax.errorbar(JRA55_bins,JRA55boxavgnormvars,yerr=JRA55normstderrorbars,color='darkgrey', linewidth=4) - #MERRA-2 - ax.plot(MERRA2_bins,MERRA2boxavgnormvars,color='lightgrey',label='MERRA-2', linewidth=4) - ax.errorbar(MERRA2_bins,MERRA2boxavgnormvars,yerr=MERRA2normstderrorbars,color='lightgrey', linewidth=4) - #Model - ax.plot(Model_bins,Modelboxavgnormvars,color='red',label=modelname, linewidth=4) - ax.errorbar(Model_bins,Modelboxavgnormvars,yerr=Modelnormstderrorbars,color='red', linewidth=4) - #Title and legend + # Box avgs of variables + JRA55boxavgnormvars.append((JRA55_mean_binned_data[reanalysisvar].sel(bin=b)) * 86400) + # Gets the range of error bar for 5 to 95% confidence interval at each bin + JRA55normstderrorbars.append((JRA55_std_binned_data[reanalysisvar].sel(bin=b)) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400) + + # CFSR + ax.plot(CFSR_bins, CFSRboxavgnormvars, color='black', label='CFSR', linewidth=4) + ax.errorbar(CFSR_bins, CFSRboxavgnormvars, yerr=CFSRnormstderrorbars, color='black', linewidth=4) + # ERA-Int + ax.plot(ERAINT_bins, ERAINTboxavgnormvars, color='dimgrey', label='ERA-Int', linewidth=4) + ax.errorbar(ERAINT_bins, ERAINTboxavgnormvars, yerr=ERAINTnormstderrorbars, color='dimgrey', linewidth=4) + # ERA-5 + ax.plot(ERA5_bins, ERA5boxavgnormvars, color='grey', label='ERA-5', linewidth=4) + ax.errorbar(ERA5_bins, ERA5boxavgnormvars, yerr=ERA5normstderrorbars, color='grey', linewidth=4) + # JRA-55 + ax.plot(JRA55_bins, JRA55boxavgnormvars, color='darkgrey', label='JRA-55', linewidth=4) + ax.errorbar(JRA55_bins, JRA55boxavgnormvars, yerr=JRA55normstderrorbars, color='darkgrey', linewidth=4) + # MERRA-2 + ax.plot(MERRA2_bins, MERRA2boxavgnormvars, color='lightgrey', label='MERRA-2', linewidth=4) + ax.errorbar(MERRA2_bins, MERRA2boxavgnormvars, yerr=MERRA2normstderrorbars, color='lightgrey', linewidth=4) + # Model + ax.plot(Model_bins, Modelboxavgnormvars, color='red', label=modelname, linewidth=4) + ax.errorbar(Model_bins, Modelboxavgnormvars, yerr=Modelnormstderrorbars, color='red', linewidth=4) + # Title and legend ax.legend(fontsize=15) ax.set_title(titlevar, fontweight='bold', fontsize=22) - ax.set_ylabel('Growth Rate [$d^-1$]',fontweight='bold',fontsize=18) - ax.set_xlabel('Mean Wind Speed [m/s]',fontweight='bold', fontsize=18) + ax.set_ylabel('Growth Rate [$d^-1$]', fontweight='bold', fontsize=18) + ax.set_xlabel('Mean Wind Speed [m/s]', fontweight='bold', fontsize=18) ax.tick_params(labelsize=18) ax.yaxis.get_offset_text().set_fontsize(18) - #Final plot adjustments - plt.suptitle('Normalized Box Average of Bin Composites for Feedback Terms',fontweight='bold',fontsize=40) - plt.subplots_adjust(hspace=0.35,wspace=0.3) - plt.savefig(os.environ['WK_DIR']+'/Normalized_Box_Average_Plots.pdf') + # Final plot adjustments + plt.suptitle('Normalized Box Average of Bin Composites for Feedback Terms', fontweight='bold', fontsize=40) + plt.subplots_adjust(hspace=0.35, wspace=0.3) + plt.savefig(os.environ['WK_DIR'] + '/Normalized_Box_Average_Plots.pdf') plt.close() + ########################################### BOX AVERAGE SCATTERING WITH % INTENSIFYING STORMS ################################################### def BoxAvScatter(): - #Marker size - size=150 - #Need to set the different bins to generate all the plots + # Marker size + size = 150 + # Need to set the different bins to generate all the plots allbins = [16.5, 19.5, 22.5, 25.5] - #Get the LMI of each storm for each dataset - Model_LMIs = np.amax(Model_mean_binned_data.maxwind, axis = 1) - ERA5_LMIs = np.amax(ERA5_mean_binned_data.maxwind, axis = 1) - ERAINT_LMIs = np.amax(ERAINT_mean_binned_data.maxwind, axis = 1) - MERRA2_LMIs = np.amax(MERRA2_mean_binned_data.maxwind, axis = 1) - CFSR_LMIs = np.amax(CFSR_mean_binned_data.maxwind, axis = 1) - JRA55_LMIs = np.amax(JRA55_mean_binned_data.maxwind, axis = 1) - #Now set up the plotting settings + # Get the LMI of each storm for each dataset + Model_LMIs = np.amax(Model_mean_binned_data.maxwind, axis=1) + ERA5_LMIs = np.amax(ERA5_mean_binned_data.maxwind, axis=1) + ERAINT_LMIs = np.amax(ERAINT_mean_binned_data.maxwind, axis=1) + MERRA2_LMIs = np.amax(MERRA2_mean_binned_data.maxwind, axis=1) + CFSR_LMIs = np.amax(CFSR_mean_binned_data.maxwind, axis=1) + JRA55_LMIs = np.amax(JRA55_mean_binned_data.maxwind, axis=1) + # Now set up the plotting settings plt.rcParams["font.weight"] = "bold" plt.rcParams["axes.labelweight"] = "bold" - fig,axs = plt.subplots(4,2,figsize=(33,30)) + fig, axs = plt.subplots(4, 2, figsize=(33, 30)) for row in range(4): b = allbins[row] - #Get the vupper and vlower - vupper = b+1.5 - vlower = b-1.5 - #Setting string bin range for main title/save name of plots + # Get the vupper and vlower + vupper = b + 1.5 + vlower = b - 1.5 + # Setting string bin range for main title/save name of plots bup = str(int(vupper)) blow = str(int(vlower)) - #Get the % storms intensifying based on the bin range of the current row - Model_percent = len(np.where(Model_LMIs>vupper)[0])/len(np.where(Model_LMIs>vlower)[0]) * 100 - ERA5_percent = len(np.where(ERA5_LMIs>vupper)[0])/len(np.where(ERA5_LMIs>vlower)[0]) * 100 - ERAINT_percent = len(np.where(ERAINT_LMIs>vupper)[0])/len(np.where(ERAINT_LMIs>vlower)[0]) * 100 - JRA55_percent = len(np.where(JRA55_LMIs>vupper)[0])/len(np.where(JRA55_LMIs>vlower)[0]) * 100 - CFSR_percent = len(np.where(CFSR_LMIs>vupper)[0])/len(np.where(CFSR_LMIs>vlower)[0]) * 100 - MERRA2_percent = len(np.where(MERRA2_LMIs>vupper)[0])/len(np.where(MERRA2_LMIs>vlower)[0]) * 100 - #Get the box average feedbacks associated with the current bin + # Get the % storms intensifying based on the bin range of the current row + Model_percent = len(np.where(Model_LMIs > vupper)[0]) / len(np.where(Model_LMIs > vlower)[0]) * 100 + ERA5_percent = len(np.where(ERA5_LMIs > vupper)[0]) / len(np.where(ERA5_LMIs > vlower)[0]) * 100 + ERAINT_percent = len(np.where(ERAINT_LMIs > vupper)[0]) / len(np.where(ERAINT_LMIs > vlower)[0]) * 100 + JRA55_percent = len(np.where(JRA55_LMIs > vupper)[0]) / len(np.where(JRA55_LMIs > vlower)[0]) * 100 + CFSR_percent = len(np.where(CFSR_LMIs > vupper)[0]) / len(np.where(CFSR_LMIs > vlower)[0]) * 100 + MERRA2_percent = len(np.where(MERRA2_LMIs > vupper)[0]) / len(np.where(MERRA2_LMIs > vlower)[0]) * 100 + # Get the box average feedbacks associated with the current bin Model_SEF = Model_mean_binned_data['new_boxav_hanom_SEFanom'].sel(bin=b) Model_LW = Model_mean_binned_data['new_boxav_hanom_LWanom'].sel(bin=b) Model_SW = Model_mean_binned_data['new_boxav_hanom_SWanom'].sel(bin=b) @@ -745,32 +905,50 @@ def BoxAvScatter(): MERRA2_SEF = MERRA2_mean_binned_data['new_boxav_hSEF_concat'].sel(bin=b) MERRA2_LW = MERRA2_mean_binned_data['new_boxav_hLW_concat'].sel(bin=b) MERRA2_SW = MERRA2_mean_binned_data['new_boxav_hSW_concat'].sel(bin=b) - #Get the errorbars of each box average above - Model_SEF_bars = Model_std_binned_data['new_boxav_hanom_SEFanom'].sel(bin=b) * 1.96 / ((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - Model_LW_bars = Model_std_binned_data['new_boxav_hanom_LWanom'].sel(bin=b) * 1.96 / ((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - Model_SW_bars = Model_std_binned_data['new_boxav_hanom_SWanom'].sel(bin=b) * 1.96 / ((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2)) + # Get the errorbars of each box average above + Model_SEF_bars = Model_std_binned_data['new_boxav_hanom_SEFanom'].sel(bin=b) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + Model_LW_bars = Model_std_binned_data['new_boxav_hanom_LWanom'].sel(bin=b) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + Model_SW_bars = Model_std_binned_data['new_boxav_hanom_SWanom'].sel(bin=b) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) - ERA5_SEF_bars = ERA5_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - ERA5_LW_bars = ERA5_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - ERA5_SW_bars = ERA5_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2)) + ERA5_SEF_bars = ERA5_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + ERA5_LW_bars = ERA5_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + ERA5_SW_bars = ERA5_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) - ERAINT_SEF_bars = ERAINT_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - ERAINT_LW_bars = ERAINT_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - ERAINT_SW_bars = ERAINT_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2)) + ERAINT_SEF_bars = ERAINT_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + ERAINT_LW_bars = ERAINT_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + ERAINT_SW_bars = ERAINT_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) - JRA55_SEF_bars = JRA55_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - JRA55_LW_bars = JRA55_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - JRA55_SW_bars = JRA55_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2)) + JRA55_SEF_bars = JRA55_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + JRA55_LW_bars = JRA55_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + JRA55_SW_bars = JRA55_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) - CFSR_SEF_bars = CFSR_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - CFSR_LW_bars = CFSR_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - CFSR_SW_bars = CFSR_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2)) + CFSR_SEF_bars = CFSR_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + CFSR_LW_bars = CFSR_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + CFSR_SW_bars = CFSR_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) - MERRA2_SEF_bars = MERRA2_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - MERRA2_LW_bars = MERRA2_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2)) - MERRA2_SW_bars = MERRA2_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2)) + MERRA2_SEF_bars = MERRA2_std_binned_data['new_boxav_hSEF_concat'].sel(bin=b) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + MERRA2_LW_bars = MERRA2_std_binned_data['new_boxav_hLW_concat'].sel(bin=b) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) + MERRA2_SW_bars = MERRA2_std_binned_data['new_boxav_hSW_concat'].sel(bin=b) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) - #Get the normalized box average feedbacks associated with the current bin + # Get the normalized box average feedbacks associated with the current bin Model_normSEF = Model_mean_binned_data['new_boxav_norm_hanom_SEFanom'].sel(bin=b) * 86400 Model_normLW = Model_mean_binned_data['new_boxav_norm_hanom_LWanom'].sel(bin=b) * 86400 Model_normSW = Model_mean_binned_data['new_boxav_norm_hanom_SWanom'].sel(bin=b) * 86400 @@ -794,152 +972,175 @@ def BoxAvScatter(): MERRA2_normSEF = MERRA2_mean_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 86400 MERRA2_normLW = MERRA2_mean_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 86400 MERRA2_normSW = MERRA2_mean_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 86400 - #Get the errorbars of each box average above - Model_normSEF_bars = Model_std_binned_data['new_boxav_norm_hanom_SEFanom'].sel(bin=b) * 1.96 / ((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - Model_normLW_bars = Model_std_binned_data['new_boxav_norm_hanom_LWanom'].sel(bin=b) * 1.96 / ((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - Model_normSW_bars = Model_std_binned_data['new_boxav_norm_hanom_SWanom'].sel(bin=b) * 1.96 / ((Model_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 + # Get the errorbars of each box average above + Model_normSEF_bars = Model_std_binned_data['new_boxav_norm_hanom_SEFanom'].sel(bin=b) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + Model_normLW_bars = Model_std_binned_data['new_boxav_norm_hanom_LWanom'].sel(bin=b) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + Model_normSW_bars = Model_std_binned_data['new_boxav_norm_hanom_SWanom'].sel(bin=b) * 1.96 / ( + (Model_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 - ERA5_normSEF_bars = ERA5_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - ERA5_normLW_bars = ERA5_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - ERA5_normSW_bars = ERA5_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ((ERA5_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 + ERA5_normSEF_bars = ERA5_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + ERA5_normLW_bars = ERA5_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + ERA5_normSW_bars = ERA5_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ( + (ERA5_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 - ERAINT_normSEF_bars = ERAINT_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - ERAINT_normLW_bars = ERAINT_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - ERAINT_normSW_bars = ERAINT_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ((ERAINT_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 + ERAINT_normSEF_bars = ERAINT_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + ERAINT_normLW_bars = ERAINT_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + ERAINT_normSW_bars = ERAINT_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ( + (ERAINT_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 - JRA55_normSEF_bars = JRA55_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - JRA55_normLW_bars = JRA55_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - JRA55_normSW_bars = JRA55_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ((JRA55_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 + JRA55_normSEF_bars = JRA55_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + JRA55_normLW_bars = JRA55_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + JRA55_normSW_bars = JRA55_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ( + (JRA55_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 - CFSR_normSEF_bars = CFSR_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - CFSR_normLW_bars = CFSR_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - CFSR_normSW_bars = CFSR_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ((CFSR_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 + CFSR_normSEF_bars = CFSR_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + CFSR_normLW_bars = CFSR_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + CFSR_normSW_bars = CFSR_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ( + (CFSR_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 - MERRA2_normSEF_bars = MERRA2_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - MERRA2_normLW_bars = MERRA2_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 - MERRA2_normSW_bars = MERRA2_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ((MERRA2_mean_binned_data.bincounts.sel(bin=b))**(1/2)) * 86400 + MERRA2_normSEF_bars = MERRA2_std_binned_data['new_boxav_norm_hSEF_concat'].sel(bin=b) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + MERRA2_normLW_bars = MERRA2_std_binned_data['new_boxav_norm_hLW_concat'].sel(bin=b) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 + MERRA2_normSW_bars = MERRA2_std_binned_data['new_boxav_norm_hSW_concat'].sel(bin=b) * 1.96 / ( + (MERRA2_mean_binned_data.bincounts.sel(bin=b)) ** (1 / 2)) * 86400 for col in range(2): - ax=axs[row,col] - if(col==0): - #Plot the scatter points of non-normalized box average feedbacks and their errorbars - #Scatter for symbol legends - SEFfeedbackleg = ax.scatter(CFSR_SEF,CFSR_percent,marker='*',color='black') - LWfeedbackleg = ax.scatter(CFSR_LW,CFSR_percent,marker='o', facecolors='white',edgecolor='black') - SWfeedbackleg = ax.scatter(CFSR_SW,CFSR_percent,marker='o',color='black') - #CFSR Scattering - ax.scatter(CFSR_SEF,CFSR_percent,marker='*',color='black',label='CFSR',s=size) - ax.errorbar(CFSR_SEF,CFSR_percent,xerr=CFSR_SEF_bars,fmt='*',color='black') - ax.errorbar(CFSR_LW,CFSR_percent,xerr=CFSR_LW_bars,fmt='-',color='black') - ax.scatter(CFSR_LW,CFSR_percent,marker='o',facecolors='white',edgecolors='black',s=size) - ax.scatter(CFSR_SW,CFSR_percent,marker='o',color='black',s=size) - ax.errorbar(CFSR_SW,CFSR_percent,xerr=CFSR_SW_bars,fmt='o',color='black') - #ERA-Int Scattering - ax.scatter(ERAINT_SEF,ERAINT_percent,marker='*',color='dimgrey',label='ERA-Int',s=size) - ax.errorbar(ERAINT_SEF,ERAINT_percent,xerr=ERAINT_SEF_bars,fmt='*',color='dimgrey') - ax.errorbar(ERAINT_LW,ERAINT_percent,xerr=ERAINT_LW_bars,fmt='-',color='dimgrey') - ax.scatter(ERAINT_LW,ERAINT_percent,marker='o',facecolors='white',edgecolors='dimgrey',s=size) - ax.scatter(ERAINT_SW,ERAINT_percent,marker='o',color='dimgrey',s=size) - ax.errorbar(ERAINT_SW,ERAINT_percent,xerr=ERAINT_SW_bars,fmt='o',color='dimgrey') - #ERA-5 Scattering - ax.scatter(ERA5_SEF,ERA5_percent,marker='*',color='grey',label='ERA-5',s=size) - ax.errorbar(ERA5_SEF,ERA5_percent,xerr=ERA5_SEF_bars,fmt='*',color='grey') - ax.errorbar(ERA5_LW,ERA5_percent,xerr=ERA5_LW_bars,fmt='-',color='grey') - ax.scatter(ERA5_LW,ERA5_percent,marker='o',facecolors='white',edgecolors='grey',s=size) - ax.scatter(ERA5_SW,ERA5_percent,marker='o',color='grey',s=size) - ax.errorbar(ERA5_SW,ERA5_percent,xerr=ERA5_SW_bars,fmt='o',color='grey') - #JRA-55 Scattering - ax.scatter(JRA55_SEF,JRA55_percent,marker='*',color='darkgrey',label='JRA-55',s=size) - ax.errorbar(JRA55_SEF,JRA55_percent,xerr=JRA55_SEF_bars,fmt='*',color='darkgrey') - ax.errorbar(JRA55_LW,JRA55_percent,xerr=JRA55_LW_bars,fmt='-',color='darkgrey') - ax.scatter(JRA55_LW,JRA55_percent,marker='o',facecolors='white',edgecolors='darkgrey',s=size) - ax.scatter(JRA55_SW,JRA55_percent,marker='o',color='darkgrey',s=size) - ax.errorbar(JRA55_SW,JRA55_percent,xerr=JRA55_SW_bars,fmt='o',color='darkgrey') - #MERRA2 Scattering - ax.scatter(MERRA2_SEF,MERRA2_percent,marker='*',color='lightgrey',label='MERRA-2',s=size) - ax.errorbar(MERRA2_SEF,MERRA2_percent,xerr=MERRA2_SEF_bars,fmt='*',color='lightgrey') - ax.errorbar(MERRA2_LW,MERRA2_percent,xerr=MERRA2_LW_bars,fmt='-',color='lightgrey') - ax.scatter(MERRA2_LW,MERRA2_percent,marker='o',facecolors='white',edgecolors='lightgrey',s=size) - ax.scatter(MERRA2_SW,MERRA2_percent,marker='o',color='lightgrey',s=size) - ax.errorbar(MERRA2_SW,MERRA2_percent,xerr=MERRA2_SW_bars,fmt='o',color='lightgrey') - #Model Scattering - ax.scatter(Model_SEF,Model_percent,marker='*',color='red',label=modelname,s=size) - ax.errorbar(Model_SEF,Model_percent,xerr=Model_SEF_bars,fmt='*',color='red') - ax.errorbar(Model_LW,Model_percent,xerr=Model_LW_bars,fmt='-',color='red') - ax.scatter(Model_LW,Model_percent,marker='o',facecolors='white',edgecolors='red',s=size) - ax.scatter(Model_SW,Model_percent,marker='o',color='red',s=size) - ax.errorbar(Model_SW,Model_percent,xerr=Model_SW_bars,fmt='o',color='red') - #Plot Features - ax.set_title('Box Average Feedbacks '+blow+'-'+bup+' m/s Bin',fontweight='bold',fontsize=20) + ax = axs[row, col] + if col == 0: + # Plot the scatter points of non-normalized box average feedbacks and their errorbars + # Scatter for symbol legends + SEFfeedbackleg = ax.scatter(CFSR_SEF, CFSR_percent, marker='*', color='black') + LWfeedbackleg = ax.scatter(CFSR_LW, CFSR_percent, marker='o', facecolors='white', edgecolor='black') + SWfeedbackleg = ax.scatter(CFSR_SW, CFSR_percent, marker='o', color='black') + # CFSR Scattering + ax.scatter(CFSR_SEF, CFSR_percent, marker='*', color='black', label='CFSR', s=size) + ax.errorbar(CFSR_SEF, CFSR_percent, xerr=CFSR_SEF_bars, fmt='*', color='black') + ax.errorbar(CFSR_LW, CFSR_percent, xerr=CFSR_LW_bars, fmt='-', color='black') + ax.scatter(CFSR_LW, CFSR_percent, marker='o', facecolors='white', edgecolors='black', s=size) + ax.scatter(CFSR_SW, CFSR_percent, marker='o', color='black', s=size) + ax.errorbar(CFSR_SW, CFSR_percent, xerr=CFSR_SW_bars, fmt='o', color='black') + # ERA-Int Scattering + ax.scatter(ERAINT_SEF, ERAINT_percent, marker='*', color='dimgrey', label='ERA-Int', s=size) + ax.errorbar(ERAINT_SEF, ERAINT_percent, xerr=ERAINT_SEF_bars, fmt='*', color='dimgrey') + ax.errorbar(ERAINT_LW, ERAINT_percent, xerr=ERAINT_LW_bars, fmt='-', color='dimgrey') + ax.scatter(ERAINT_LW, ERAINT_percent, marker='o', facecolors='white', edgecolors='dimgrey', s=size) + ax.scatter(ERAINT_SW, ERAINT_percent, marker='o', color='dimgrey', s=size) + ax.errorbar(ERAINT_SW, ERAINT_percent, xerr=ERAINT_SW_bars, fmt='o', color='dimgrey') + # ERA-5 Scattering + ax.scatter(ERA5_SEF, ERA5_percent, marker='*', color='grey', label='ERA-5', s=size) + ax.errorbar(ERA5_SEF, ERA5_percent, xerr=ERA5_SEF_bars, fmt='*', color='grey') + ax.errorbar(ERA5_LW, ERA5_percent, xerr=ERA5_LW_bars, fmt='-', color='grey') + ax.scatter(ERA5_LW, ERA5_percent, marker='o', facecolors='white', edgecolors='grey', s=size) + ax.scatter(ERA5_SW, ERA5_percent, marker='o', color='grey', s=size) + ax.errorbar(ERA5_SW, ERA5_percent, xerr=ERA5_SW_bars, fmt='o', color='grey') + # JRA-55 Scattering + ax.scatter(JRA55_SEF, JRA55_percent, marker='*', color='darkgrey', label='JRA-55', s=size) + ax.errorbar(JRA55_SEF, JRA55_percent, xerr=JRA55_SEF_bars, fmt='*', color='darkgrey') + ax.errorbar(JRA55_LW, JRA55_percent, xerr=JRA55_LW_bars, fmt='-', color='darkgrey') + ax.scatter(JRA55_LW, JRA55_percent, marker='o', facecolors='white', edgecolors='darkgrey', s=size) + ax.scatter(JRA55_SW, JRA55_percent, marker='o', color='darkgrey', s=size) + ax.errorbar(JRA55_SW, JRA55_percent, xerr=JRA55_SW_bars, fmt='o', color='darkgrey') + # MERRA2 Scattering + ax.scatter(MERRA2_SEF, MERRA2_percent, marker='*', color='lightgrey', label='MERRA-2', s=size) + ax.errorbar(MERRA2_SEF, MERRA2_percent, xerr=MERRA2_SEF_bars, fmt='*', color='lightgrey') + ax.errorbar(MERRA2_LW, MERRA2_percent, xerr=MERRA2_LW_bars, fmt='-', color='lightgrey') + ax.scatter(MERRA2_LW, MERRA2_percent, marker='o', facecolors='white', edgecolors='lightgrey', s=size) + ax.scatter(MERRA2_SW, MERRA2_percent, marker='o', color='lightgrey', s=size) + ax.errorbar(MERRA2_SW, MERRA2_percent, xerr=MERRA2_SW_bars, fmt='o', color='lightgrey') + # Model Scattering + ax.scatter(Model_SEF, Model_percent, marker='*', color='red', label=modelname, s=size) + ax.errorbar(Model_SEF, Model_percent, xerr=Model_SEF_bars, fmt='*', color='red') + ax.errorbar(Model_LW, Model_percent, xerr=Model_LW_bars, fmt='-', color='red') + ax.scatter(Model_LW, Model_percent, marker='o', facecolors='white', edgecolors='red', s=size) + ax.scatter(Model_SW, Model_percent, marker='o', color='red', s=size) + ax.errorbar(Model_SW, Model_percent, xerr=Model_SW_bars, fmt='o', color='red') + # Plot Features + ax.set_title('Box Average Feedbacks ' + blow + '-' + bup + ' m/s Bin', fontweight='bold', fontsize=20) ax.set_xlabel('[$J^2/m^4s$]', fontweight='bold', fontsize=20) ax.set_ylabel('Percent of Storms [%]', fontweight='bold', fontsize=20) - leg1 = ax.legend(loc='upper left', bbox_to_anchor=(1, 1),fontsize=16) - leg2 = ax.legend([SEFfeedbackleg,LWfeedbackleg,SWfeedbackleg],["h'SEF'","h'LW'","h'SW'"],loc='center left', bbox_to_anchor=(1, 0.4),fontsize=16) + leg1 = ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=16) + leg2 = ax.legend([SEFfeedbackleg, LWfeedbackleg, SWfeedbackleg], ["h'SEF'", "h'LW'", "h'SW'"], + loc='center left', bbox_to_anchor=(1, 0.4), fontsize=16) ax.add_artist(leg1) ax.add_artist(leg2) ax.tick_params(labelsize=20) ax.xaxis.get_offset_text().set_fontsize(20) - elif(col==1): - #Plot the scatter points of normalized box average feedbacks and their errorbars - #Scatter for symbol legends - SEFfeedbackleg = ax.scatter(CFSR_normSEF,CFSR_percent,marker='*',color='black') - LWfeedbackleg = ax.scatter(CFSR_normLW,CFSR_percent,marker='o', facecolors='white',edgecolor='black') - SWfeedbackleg = ax.scatter(CFSR_normSW,CFSR_percent,marker='o',color='black') - #CFSR Scattering - ax.scatter(CFSR_normSEF,CFSR_percent,marker='*',color='black',label='CFSR',s=size) - ax.errorbar(CFSR_normSEF,CFSR_percent,xerr=CFSR_normSEF_bars,fmt='*',color='black') - ax.errorbar(CFSR_normLW,CFSR_percent,xerr=CFSR_normLW_bars,fmt='-',color='black') - ax.scatter(CFSR_normLW,CFSR_percent,marker='o',facecolors='white',edgecolors='black',s=size) - ax.scatter(CFSR_normSW,CFSR_percent,marker='o',color='black',s=size) - ax.errorbar(CFSR_normSW,CFSR_percent,xerr=CFSR_normSW_bars,fmt='o',color='black') - #ERA-Int Scattering - ax.scatter(ERAINT_normSEF,ERAINT_percent,marker='*',color='dimgrey',label='ERA-Int',s=size) - ax.errorbar(ERAINT_normSEF,ERAINT_percent,xerr=ERAINT_normSEF_bars,fmt='*',color='dimgrey') - ax.errorbar(ERAINT_normLW,ERAINT_percent,xerr=ERAINT_normLW_bars,fmt='-',color='dimgrey') - ax.scatter(ERAINT_normLW,ERAINT_percent,marker='o',facecolors='white',edgecolors='dimgrey',s=size) - ax.scatter(ERAINT_normSW,ERAINT_percent,marker='o',color='dimgrey',s=size) - ax.errorbar(ERAINT_normSW,ERAINT_percent,xerr=ERAINT_normSW_bars,fmt='o',color='dimgrey') - #ERA-5 Scattering - ax.scatter(ERA5_normSEF,ERA5_percent,marker='*',color='grey',label='ERA-5',s=size) - ax.errorbar(ERA5_normSEF,ERA5_percent,xerr=ERA5_normSEF_bars,fmt='*',color='grey') - ax.errorbar(ERA5_normLW,ERA5_percent,xerr=ERA5_normLW_bars,fmt='-',color='grey') - ax.scatter(ERA5_normLW,ERA5_percent,marker='o',facecolors='white',edgecolors='grey',s=size) - ax.scatter(ERA5_normSW,ERA5_percent,marker='o',color='grey',s=size) - ax.errorbar(ERA5_normSW,ERA5_percent,xerr=ERA5_normSW_bars,fmt='o',color='grey') - #JRA-55 Scattering - ax.scatter(JRA55_normSEF,JRA55_percent,marker='*',color='darkgrey',label='JRA-55',s=size) - ax.errorbar(JRA55_normSEF,JRA55_percent,xerr=JRA55_normSEF_bars,fmt='*',color='darkgrey') - ax.errorbar(JRA55_normLW,JRA55_percent,xerr=JRA55_normLW_bars,fmt='-',color='darkgrey') - ax.scatter(JRA55_normLW,JRA55_percent,marker='o',facecolors='white',edgecolors='darkgrey',s=size) - ax.scatter(JRA55_normSW,JRA55_percent,marker='o',color='darkgrey',s=size) - ax.errorbar(JRA55_normSW,JRA55_percent,xerr=JRA55_normSW_bars,fmt='o',color='darkgrey') - #MERRA2 Scattering - ax.scatter(MERRA2_normSEF,MERRA2_percent,marker='*',color='lightgrey',label='MERRA-2',s=size) - ax.errorbar(MERRA2_normSEF,MERRA2_percent,xerr=MERRA2_normSEF_bars,fmt='*',color='lightgrey') - ax.errorbar(MERRA2_normLW,MERRA2_percent,xerr=MERRA2_normLW_bars,fmt='-',color='lightgrey') - ax.scatter(MERRA2_normLW,MERRA2_percent,marker='o',facecolors='white',edgecolors='lightgrey',s=size) - ax.scatter(MERRA2_normSW,MERRA2_percent,marker='o',color='lightgrey',s=size) - ax.errorbar(MERRA2_normSW,MERRA2_percent,xerr=MERRA2_normSW_bars,fmt='o',color='lightgrey') - #Model Scattering - ax.scatter(Model_normSEF,Model_percent,marker='*',color='red',label=modelname,s=size) - ax.errorbar(Model_normSEF,Model_percent,xerr=Model_normSEF_bars,fmt='*',color='red') - ax.errorbar(Model_normLW,Model_percent,xerr=Model_normLW_bars,fmt='-',color='red') - ax.scatter(Model_normLW,Model_percent,marker='o',facecolors='white',edgecolors='red',s=size) - ax.scatter(Model_normSW,Model_percent,marker='o',color='red',s=size) - ax.errorbar(Model_normSW,Model_percent,xerr=Model_normSW_bars,fmt='o',color='red') - #Plot Features - ax.set_title('Normalized Box Average Feedbacks '+blow+'-'+bup+' m/s Bin',fontweight='bold',fontsize=20) + elif col == 1: + # Plot the scatter points of normalized box average feedbacks and their errorbars + # Scatter for symbol legends + SEFfeedbackleg = ax.scatter(CFSR_normSEF, CFSR_percent, marker='*', color='black') + LWfeedbackleg = ax.scatter(CFSR_normLW, CFSR_percent, marker='o', facecolors='white', edgecolor='black') + SWfeedbackleg = ax.scatter(CFSR_normSW, CFSR_percent, marker='o', color='black') + # CFSR Scattering + ax.scatter(CFSR_normSEF, CFSR_percent, marker='*', color='black', label='CFSR', s=size) + ax.errorbar(CFSR_normSEF, CFSR_percent, xerr=CFSR_normSEF_bars, fmt='*', color='black') + ax.errorbar(CFSR_normLW, CFSR_percent, xerr=CFSR_normLW_bars, fmt='-', color='black') + ax.scatter(CFSR_normLW, CFSR_percent, marker='o', facecolors='white', edgecolors='black', s=size) + ax.scatter(CFSR_normSW, CFSR_percent, marker='o', color='black', s=size) + ax.errorbar(CFSR_normSW, CFSR_percent, xerr=CFSR_normSW_bars, fmt='o', color='black') + # ERA-Int Scattering + ax.scatter(ERAINT_normSEF, ERAINT_percent, marker='*', color='dimgrey', label='ERA-Int', s=size) + ax.errorbar(ERAINT_normSEF, ERAINT_percent, xerr=ERAINT_normSEF_bars, fmt='*', color='dimgrey') + ax.errorbar(ERAINT_normLW, ERAINT_percent, xerr=ERAINT_normLW_bars, fmt='-', color='dimgrey') + ax.scatter(ERAINT_normLW, ERAINT_percent, marker='o', facecolors='white', edgecolors='dimgrey', s=size) + ax.scatter(ERAINT_normSW, ERAINT_percent, marker='o', color='dimgrey', s=size) + ax.errorbar(ERAINT_normSW, ERAINT_percent, xerr=ERAINT_normSW_bars, fmt='o', color='dimgrey') + # ERA-5 Scattering + ax.scatter(ERA5_normSEF, ERA5_percent, marker='*', color='grey', label='ERA-5', s=size) + ax.errorbar(ERA5_normSEF, ERA5_percent, xerr=ERA5_normSEF_bars, fmt='*', color='grey') + ax.errorbar(ERA5_normLW, ERA5_percent, xerr=ERA5_normLW_bars, fmt='-', color='grey') + ax.scatter(ERA5_normLW, ERA5_percent, marker='o', facecolors='white', edgecolors='grey', s=size) + ax.scatter(ERA5_normSW, ERA5_percent, marker='o', color='grey', s=size) + ax.errorbar(ERA5_normSW, ERA5_percent, xerr=ERA5_normSW_bars, fmt='o', color='grey') + # JRA-55 Scattering + ax.scatter(JRA55_normSEF, JRA55_percent, marker='*', color='darkgrey', label='JRA-55', s=size) + ax.errorbar(JRA55_normSEF, JRA55_percent, xerr=JRA55_normSEF_bars, fmt='*', color='darkgrey') + ax.errorbar(JRA55_normLW, JRA55_percent, xerr=JRA55_normLW_bars, fmt='-', color='darkgrey') + ax.scatter(JRA55_normLW, JRA55_percent, marker='o', facecolors='white', edgecolors='darkgrey', s=size) + ax.scatter(JRA55_normSW, JRA55_percent, marker='o', color='darkgrey', s=size) + ax.errorbar(JRA55_normSW, JRA55_percent, xerr=JRA55_normSW_bars, fmt='o', color='darkgrey') + # MERRA2 Scattering + ax.scatter(MERRA2_normSEF, MERRA2_percent, marker='*', color='lightgrey', label='MERRA-2', s=size) + ax.errorbar(MERRA2_normSEF, MERRA2_percent, xerr=MERRA2_normSEF_bars, fmt='*', color='lightgrey') + ax.errorbar(MERRA2_normLW, MERRA2_percent, xerr=MERRA2_normLW_bars, fmt='-', color='lightgrey') + ax.scatter(MERRA2_normLW, MERRA2_percent, marker='o', facecolors='white', edgecolors='lightgrey', + s=size) + ax.scatter(MERRA2_normSW, MERRA2_percent, marker='o', color='lightgrey', s=size) + ax.errorbar(MERRA2_normSW, MERRA2_percent, xerr=MERRA2_normSW_bars, fmt='o', color='lightgrey') + # Model Scattering + ax.scatter(Model_normSEF, Model_percent, marker='*', color='red', label=modelname, s=size) + ax.errorbar(Model_normSEF, Model_percent, xerr=Model_normSEF_bars, fmt='*', color='red') + ax.errorbar(Model_normLW, Model_percent, xerr=Model_normLW_bars, fmt='-', color='red') + ax.scatter(Model_normLW, Model_percent, marker='o', facecolors='white', edgecolors='red', s=size) + ax.scatter(Model_normSW, Model_percent, marker='o', color='red', s=size) + ax.errorbar(Model_normSW, Model_percent, xerr=Model_normSW_bars, fmt='o', color='red') + # Plot Features + ax.set_title('Normalized Box Average Feedbacks ' + blow + '-' + bup + ' m/s Bin', fontweight='bold', + fontsize=20) ax.set_xlabel('[$d^-1$]', fontweight='bold', fontsize=20) ax.set_ylabel('Percent of Storms [%]', fontweight='bold', fontsize=20) - leg1 = ax.legend(loc='upper left', bbox_to_anchor=(1, 1),fontsize=16) - leg2 = ax.legend([SEFfeedbackleg,LWfeedbackleg,SWfeedbackleg],["h'SEF'","h'LW'","h'SW'"],loc='center left', bbox_to_anchor=(1, 0.4),fontsize=16) + leg1 = ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=16) + leg2 = ax.legend([SEFfeedbackleg, LWfeedbackleg, SWfeedbackleg], ["h'SEF'", "h'LW'", "h'SW'"], + loc='center left', bbox_to_anchor=(1, 0.4), fontsize=16) ax.add_artist(leg1) ax.add_artist(leg2) ax.tick_params(labelsize=20) ax.xaxis.get_offset_text().set_fontsize(20) - plt.suptitle('Scatter Plots of Percent of Storms Intensifying as Function of Box-Averaged Feedback', fontweight='bold', fontsize=35) - plt.subplots_adjust(hspace=0.35,wspace=0.3) - plt.savefig(os.environ['WK_DIR']+'/Scattered_Feedbacks.pdf') + plt.suptitle('Scatter Plots of Percent of Storms Intensifying as Function of Box-Averaged Feedback', + fontweight='bold', fontsize=35) + plt.subplots_adjust(hspace=0.35, wspace=0.3) + plt.savefig(os.environ['WORK_DIR'] + '/Scattered_Feedbacks.pdf') plt.close() diff --git a/diagnostics/TC_MSE/TC_MSE.html b/diagnostics/TC_MSE/TC_MSE.html index 24bc7667e..f3a9dba59 100644 --- a/diagnostics/TC_MSE/TC_MSE.html +++ b/diagnostics/TC_MSE/TC_MSE.html @@ -2,11 +2,13 @@

-This POD reads in model track data to create 10 degree by 10 degree snapshots around a tropical cyclone (TC) center. The snapshots are made for all storms between -the years inputted by the user and the column-integrated moist static energy (MSE) budget diabatic terms are calculated for in various ways (azimuthal mean, box-averaged -nromalized). Then, the snapshots are binned based on the maximum velocity at the time of the snapshot and composited by bin. The bin composites are then compared -across 5 reanalysis datasets (CFSR, MERRA2, ERA-Interim, ERA-5, and JRA-55), which have already gone through the aforementioned procedure, by being plotted against -each other in different ways shown below. + This POD reads in model track data to create 10 degree by 10 degree snapshots around a tropical cyclone (TC) center. + The snapshots are made for all storms between the + years inputted by the user and the column-integrated moist static energy (MSE) budget diabatic terms are calculated + for in various ways (azimuthal mean, box-averaged normalized). Then, the snapshots are binned based on the maximum + velocity at the time of the snapshot and composited by bin. The bin composites are then compared across 5 + reanalysis datasets (CFSR, MERRA2, ERA-Interim, ERA-5, and JRA-55), which have already gone through the + aforementioned procedure, by being plotted against each other in different ways shown below.

diff --git a/diagnostics/TC_MSE/TC_MSE_Driver.py b/diagnostics/TC_MSE/TC_MSE_Driver.py index e3a4cef4f..2725d94dd 100644 --- a/diagnostics/TC_MSE/TC_MSE_Driver.py +++ b/diagnostics/TC_MSE/TC_MSE_Driver.py @@ -1,27 +1,27 @@ -#Import necessary module(s) +# Import necessary module(s) import os -#Run through the necessary script(s) +# Run through the necessary script(s) -#This will read in track data and create the TC snapshots and calculate MSE variables as well as save TC characteristic variables +# This will read in track data and create the TC snapshots and calculate MSE variables as well as save TC characteristic variables print("Running the TC snapshotting code") print("==============================================") -os.system("python "+ os.environ["POD_HOME"]+"/TC_snapshot_MSE_calc.py") +os.system("python " + os.environ["POD_HOME"]+"/TC_snapshot_MSE_calc.py") print("Snapshotting code has finished!") print("==============================================") -#This will take the files created from above section and bin the variables as well as composite them +# This will take the files created from above section and bin the variables as well as composite them print("Running the binning and compositing code") print("==============================================") -os.system("python "+ os.environ["POD_HOME"]+"/Binning_and_compositing.py") +os.system("python " + os.environ["POD_HOME"]+"/Binning_and_compositing.py") print("Binning and compositing code has finished!") print("==============================================") -#This will create the various plots which allow for comparison to the 5 reanalysis datasets +# This will create the various plots which allow for comparison to the 5 reanalysis datasets print("Running the plotting code") print("==============================================") @@ -30,5 +30,5 @@ print("Plotting code has finished!") print("==============================================") -#Message noting that the framework has been completed +# Message noting that the framework has been completed print("TC MSE POD has been completed successfully!") diff --git a/diagnostics/TC_MSE/TC_snapshot_MSE_calc.py b/diagnostics/TC_MSE/TC_snapshot_MSE_calc.py index 15bc25c98..1286225fe 100644 --- a/diagnostics/TC_MSE/TC_snapshot_MSE_calc.py +++ b/diagnostics/TC_MSE/TC_snapshot_MSE_calc.py @@ -1,97 +1,106 @@ -#Import modules +# Import modules import os import numpy as np import pandas as pd import xarray as xr -######################## MATH FUNCTION(S) ############################################ -def boxavg(thing,lat,lon): - coslat_values = np.transpose(np.tile(np.cos(np.deg2rad(lat)),(len(lon),1))) - thing1 = thing*coslat_values - thing2 = thing1/thing1 - average = np.nansum(np.nansum(thing1,0))/np.nansum(np.nansum(coslat_values*thing2,0)) + +# ####################### MATH FUNCTION(S) ############################################ +def boxavg(thing, lat, lon): + coslat_values = np.transpose(np.tile(np.cos(np.deg2rad(lat)), (len(lon), 1))) + thing1 = thing * coslat_values + thing2 = thing1 / thing1 + average = np.nansum(np.nansum(thing1, 0)) / np.nansum(np.nansum(coslat_values * thing2, 0)) return average -############################READING IN TRACK DATA###################################################### -#Give the start and end year of track data -start_year = np.int(os.getenv("FIRSTYR")) -end_year = np.int(os.getenv("LASTYR")) -#Input of the specific model's lat/lon resolution or grid spacing (degrees) and name of model being run +# ###########################READING IN TRACK DATA###################################################### +# Give the start and end year of track data + + +start_year = np.int(os.getenv("startdate")) +end_year = np.int(os.getenv("enddate")) + +# Input of the specific model's lat/lon resolution or grid spacing (degrees) and name of model being run modelname = str(os.getenv("modelname")) -latres = np.float(os.getenv("latres")) -lonres = np.float(os.getenv("lonres")) +latres = float(os.getenv("latres")) +lonres = float(os.getenv("lonres")) -#Getting track data, it is currently set up to read in a .txt file, this function will need to be changed -#if your model track data is not in the same format as this .txt file. +# Getting track data, it is currently set up to read in a .txt file, this function will need to be changed +# if your model track data is not in the same format as this .txt file. trackdata = os.environ["OBS_DATA"] + "/trackdata.txt" -def ReadTrackData(trackdata,start_year,end_year): - df = pd.read_csv(trackdata, sep='\s+', header=None, names=\ - ['lon', 'lat', 'windspeed (m/s)', 'pressure (hPa)', 'year', 'month', 'day', 'hour']) - #Add flag so it knows where to start from - df_starts = df[df['lon']=='start'] - #Start by assigning first storm in dataset with an ID of 1 - storm_id = 1 - for idx, num_steps in zip(df_starts.index, df_starts['lat'].values): - #Add in column for storm ID - df.loc[idx:idx+num_steps+1,'stormid'] = storm_id - #Add 1 to storm ID each time you get to the end of a particular storm track to continue looping - storm_id += 1 - - #Drop the rows that have the starter variable - df = df.dropna().reset_index(drop=True) #only in the rows with start have NaN values, so this works - - #Adjust format of some columns - df.loc[:,'year'] = df.loc[:,'year'].astype(int).astype(str) - df.loc[:,'month'] = df.loc[:,'month'].astype(int).astype(str) - df.loc[:,'day'] = df.loc[:,'day'].astype(int).astype(str) - df.loc[:,'hour'] = df.loc[:,'hour'].astype(int).astype(str) - - #Adjust the times to match CMIP6 time read-in format - df.loc[:,'hour'] = np.where(df['hour'].astype(int)<10,'0'+df['hour'],df['hour']) - df.loc[:,'day'] = np.where(df['day'].astype(int)<10,'0'+df['day'],df['day']) - df.loc[:,'month'] = np.where(df['month'].astype(int)<10,'0'+df['month'],df['month']) - #Create a date stamp column in identical format to cftime conversion to string - df.loc[:,'Modeltime'] = df['year']+'-'+df['month']+'-'+df['day']+' '+df['hour']+':00:00' - - #Find max storm ID number - num_storms = int(max(df.iloc[:]['stormid'])) - - #Creating list of storm IDs by year - tracks_by_year = {year: [] for year in range(start_year,end_year+1)} #empty array of storm tracks by yr - #Loop through all storms - for storm in range(1,num_storms+1): - #Get list of characteristics of storm ID you're on - ds_storm = df[df['stormid']==storm] - #Get years unique to that storm - times = ds_storm['year'].values - if(int(times[0]) < start_year and int(times[-1]) < start_year) or (\ - int(times[0]) > end_year and int(times[-1]) > end_year): - continue - - tracks_by_year[int(times[0])].append(storm) #Append list of storms to start year - - return df, tracks_by_year - -#Gather relevant variables returned from the track data read in function -df, tracks_by_year = ReadTrackData(trackdata,start_year,end_year) -#Create empty list of years that are in tracks_by_years to loop through later -years = [] + +def ReadTrackData(trackdata, start_year, end_year): + df = pd.read_csv(trackdata, sep='\s+', header=None, names= + ['lon', 'lat', 'windspeed (m/s)', 'pressure (hPa)', 'year', 'month', 'day', 'hour']) + # Add flag so it knows where to start from + df_starts = df[df['lon'] == 'start'] + # Start by assigning first storm in dataset with an ID of 1 + storm_id = 1 + for idx, num_steps in zip(df_starts.index, df_starts['lat'].values): + # Add in column for storm ID + df.loc[idx:idx + num_steps + 1, 'stormid'] = storm_id + # Add 1 to storm ID each time you get to the end of a particular storm track to continue looping + storm_id += 1 + + # Drop the rows that have the starter variable + df = df.dropna().reset_index(drop=True) # only in the rows with start have NaN values, so this works + + # Adjust format of some columns + df.loc[:, 'year'] = df.loc[:, 'year'].astype(int).astype(str) + df.loc[:, 'month'] = df.loc[:, 'month'].astype(int).astype(str) + df.loc[:, 'day'] = df.loc[:, 'day'].astype(int).astype(str) + df.loc[:, 'hour'] = df.loc[:, 'hour'].astype(int).astype(str) + + # Adjust the times to match CMIP6 time read-in format + df.loc[:, 'hour'] = np.where(df['hour'].astype(int) < 10, '0' + df['hour'], df['hour']) + df.loc[:, 'day'] = np.where(df['day'].astype(int) < 10, '0' + df['day'], df['day']) + df.loc[:, 'month'] = np.where(df['month'].astype(int) < 10, '0' + df['month'], df['month']) + # Create a date stamp column in identical format to cftime conversion to string + df.loc[:, 'Modeltime'] = df['year'] + '-' + df['month'] + '-' + df['day'] + ' ' + df['hour'] + ':00:00' + + # Find max storm ID number + num_storms = int(max(df.iloc[:]['stormid'])) + + # Creating list of storm IDs by year + tracks_by_year = {year: [] for year in range(start_year, end_year + 1)} # empty array of storm tracks by yr + # Loop through all storms + for storm in range(1, num_storms + 1): + # Get list of characteristics of storm ID you're on + ds_storm = df[df['stormid'] == storm] + # Get years unique to that storm + times = ds_storm['year'].values + if (int(times[0]) < start_year and int(times[-1]) < start_year or + int(times[0]) > end_year and int(times[-1]) > end_year): + continue + + tracks_by_year[int(times[0])].append(storm) # Append list of storms to start year + + return df, tracks_by_year + + +# Gather relevant variables returned from the track data read in function +df, tracks_by_year = ReadTrackData(trackdata, start_year, end_year) +# C reate empty list of years that are in tracks_by_years to loop through later +years = [] for yr in tracks_by_year: - years.append(yr) + years.append(yr) -################################### READING IN MODEL DATA FROM TRACK DATA###################################################### +# ################################## READING IN MODEL DATA FROM TRACK DATA########################### -#Can open any data variable to do this for the given model as the sample 'ds' dataset has all the same dimensions and spacing +# Can open any data variable to do this for the given model as the sample 'ds' dataset has all the same dimensions +# and spacing ds = xr.open_dataset(os.environ["ta_var"], decode_times=True, use_cftime=True) -#Get a list of times from model data into an indexed list to use for later when pulling track data for a given time of focus +# Get a list of times from model data into an indexed list to use for later when pulling track +# data for a given time of focus tarray = ds.indexes['time'].to_datetimeindex() tarray = tarray.astype(str) itarray = pd.Index(tarray) itlist = itarray.tolist() -#Now gather and put general lats/lons list into index format to use later for gathering lat/lonbox data for a given time +# Now gather and put general lats/lons list into index format to use later for gathering lat/lonbox data for a given +# time lats = np.array(ds['lat']) lons = np.array(ds['lon']) plevs = np.array(ds['plev']) @@ -101,23 +110,25 @@ def ReadTrackData(trackdata,start_year,end_year): ilatlist = ilats.tolist() ilonlist = ilons.tolist() iplevlist = iplevs.tolist() -#From the track data gather the minimum MSLP for column-integrated MSE -minMSLP = min(df['pressure (hPa)'])*100 +# From the track data gather the minimum MSLP for column-integrated MSE +minMSLP = min(df['pressure (hPa)']) * 100 minplev = ds['plev'].sel(plev=minMSLP, method='nearest') upperlvlplev = min(ds['plev']) iminplev = iplevlist.index(minplev) iupperplev = iplevlist.index(upperlvlplev) -#Now close the sample dataset used for gathering indexed lists of variables +# Now close the sample dataset used for gathering indexed lists of variables ds.close() -#Gather the land-sea mask data and convert the percentages to zeros or NaN's based on if grid point is >20% (can use same lat/lon index lists made above as it is the same as other files) -mask_ds = xr.open_dataset(os.environ["OBS_DATA"]+"/sftlf_fx_GFDL-CM4_amip_r1i1p1f1_gr1.nc", decode_times=True, use_cftime=True) +# Gather the land-sea mask data and convert the percentages to zeros or NaN's based on if grid point is >20% +# (can use same lat/lon index lists made above as it is the same as other files) +mask_ds = xr.open_dataset(os.environ["OBS_DATA"] + "/sftlf_fx_GFDL-CM4_amip_r1i1p1f1_gr1.nc", decode_times=True, + use_cftime=True) lsm = mask_ds.sftlf -#We have our land-sea mask read in at this point, we can close the parent dataset +# We have our land-sea mask read in at this point, we can close the parent dataset mask_ds.close() -#Now open the general variable datasets so they can be pulled from below and only be opened once +# Now open the general variable datasets so they can be pulled from below and only be opened once phi_ds = xr.open_dataset(os.environ["zg_var"], decode_times=True, use_cftime=True) -T_ds = xr.open_dataset(os.environ["ta_var"], decode_times=True, use_cftime = True) -q_ds = xr.open_dataset(os.environ["hus_var"], decode_times=True, use_cftime = True) +T_ds = xr.open_dataset(os.environ["ta_var"], decode_times=True, use_cftime=True) +q_ds = xr.open_dataset(os.environ["hus_var"], decode_times=True, use_cftime=True) hfls_ds = xr.open_dataset(os.environ["hfls_var"], decode_times=True, use_cftime=True) hfss_ds = xr.open_dataset(os.environ["hfss_var"], decode_times=True, use_cftime=True) rlds_ds = xr.open_dataset(os.environ["rlds_var"], decode_times=True, use_cftime=True) @@ -128,411 +139,521 @@ def ReadTrackData(trackdata,start_year,end_year): rsus_ds = xr.open_dataset(os.environ["rsus_var"], decode_times=True, use_cftime=True) rsut_ds = xr.open_dataset(os.environ["rsut_var"], decode_times=True, use_cftime=True) -#Start Looping through the years so we can have a ntecdf file saved per year that has all the data of variables for 10deg lat/lon boxes +# Start Looping through the years so we can have a ntecdf file saved per year that has all the data of variables +# for 10deg lat/lon boxes for year in years: - #Set up the 4 dimensions of the data arrays from model data - #Latitude, Longitude amounts to get 10X10 deg box - latlen = int(10/latres + 1) #Center lat position is one index, then 5 degrees up and 5 degrees down - lonlen = int(10/lonres + 1) #Center lon position is one index, then 5 degrees left and 5 degrees right - #Get the amount of storms in the year + # Set up the 4 dimensions of the data arrays from model data + # Latitude, Longitude amounts to get 10X10 deg box + latlen = int(10 / latres + 1) # Center lat position is one index, then 5 degrees up and 5 degrees down + lonlen = int(10 / lonres + 1) # Center lon position is one index, then 5 degrees left and 5 degrees right + # Get the amount of storms in the year numstorms = len(tracks_by_year[year]) - #Get the maximum track observations across all storms in track data + # Get the maximum track observations across all storms in track data numsteps = max(df['stormid'].value_counts()) - #Create the 4-D arrays for all variables desired - h_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + # Create the 4-D arrays for all variables desired + h_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan + + ClmnLWfluxConv_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - ClmnLWfluxConv_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + ClmnSWfluxConv_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - ClmnSWfluxConv_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + ClmnRadfluxConv_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - ClmnRadfluxConv_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + OLR_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - OLR_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistContrib_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistContrib_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempContrib_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempContrib_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hfls_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hfls_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hfss_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hfss_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + sfcMoistEnthalpyFlux_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - sfcMoistEnthalpyFlux_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hvar_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hvar_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistvar_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistvar_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempvar_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempvar_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_LWanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_LWanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_OLRanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_OLRanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_SWanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_SWanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_RADanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_RADanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_SEFanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_SEFanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_hflsanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_hflsanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hanom_hfssanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hanom_hfssanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_LWanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_LWanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_OLRanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_OLRanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_SWanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_SWanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_RADanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_RADanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_SEFanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_SEFanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_hflsanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_hflsanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hMoistanom_hfssanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hMoistanom_hfssanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_LWanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_LWanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_OLRanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_OLRanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_SWanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_SWanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_RADanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_RADanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_SEFanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_SEFanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_hflsanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_hflsanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan + hTempanom_hfssanom_save = np.ones((numstorms, numsteps, latlen, lonlen)) * np.nan - hTempanom_hfssanom_save = np.ones((numstorms, numsteps, latlen, lonlen))*np.nan - - #Non-Radiative variables (ex: slp, wind, years, months, days, hours, latbox, lonbox, clat, clon, etc.) - #3D variables - latbox_save = np.ones((numstorms, numsteps, latlen))*np.nan + # Non-Radiative variables (ex: slp, wind, years, months, days, hours, latbox, lonbox, clat, clon, etc.) + # 3D variables + latbox_save = np.ones((numstorms, numsteps, latlen)) * np.nan - lonbox_save = np.ones((numstorms, numsteps, lonlen))*np.nan + lonbox_save = np.ones((numstorms, numsteps, lonlen)) * np.nan - #2D variables - maxwind_save = np.ones((numstorms, numsteps))*np.nan + # 2D variables + maxwind_save = np.ones((numstorms, numsteps)) * np.nan - minSLP_save = np.ones((numstorms, numsteps))*np.nan + minSLP_save = np.ones((numstorms, numsteps)) * np.nan - Clat_save = np.ones((numstorms, numsteps))*np.nan + Clat_save = np.ones((numstorms, numsteps)) * np.nan - Clon_save = np.ones((numstorms, numsteps))*np.nan + Clon_save = np.ones((numstorms, numsteps)) * np.nan - year_save = np.ones((numstorms, numsteps))*np.nan + year_save = np.ones((numstorms, numsteps)) * np.nan - month_save = np.ones((numstorms, numsteps))*np.nan + month_save = np.ones((numstorms, numsteps)) * np.nan - day_save = np.ones((numstorms, numsteps))*np.nan + day_save = np.ones((numstorms, numsteps)) * np.nan - hour_save = np.ones((numstorms, numsteps))*np.nan + hour_save = np.ones((numstorms, numsteps)) * np.nan - #Start Looping through the storms in the given year + # Start Looping through the storms in the given year for s, storm in enumerate(tracks_by_year[year]): - #Get the storm data for the storm the index is on - stormdata = df[df['stormid']==storm] - #Get list/arrays of all storm track data for the specific storm + # Get the storm data for the storm the index is on + stormdata = df[df['stormid'] == storm] + # Get list/arrays of all storm track data for the specific storm times = stormdata['Modeltime'].values - clats = np.array(stormdata.loc[:,'lat'].astype(float)) - clons = np.array(stormdata.loc[:,'lon'].astype(float)) - maxwind = np.array(stormdata.loc[:,'windspeed (m/s)']) - minSLP = np.array(stormdata.loc[:,'pressure (hPa)']) - yr = np.array(stormdata.loc[:,'year'].astype(int)) - mo = np.array(stormdata.loc[:,'month'].astype(int)) - d = np.array(stormdata.loc[:,'day'].astype(int)) - hr = np.array(stormdata.loc[:,'hour'].astype(int)) - #Start looping through all the times in the given storm we are on + clats = np.array(stormdata.loc[:, 'lat'].astype(float)) + clons = np.array(stormdata.loc[:, 'lon'].astype(float)) + maxwind = np.array(stormdata.loc[:, 'windspeed (m/s)']) + minSLP = np.array(stormdata.loc[:, 'pressure (hPa)']) + yr = np.array(stormdata.loc[:, 'year'].astype(int)) + mo = np.array(stormdata.loc[:, 'month'].astype(int)) + d = np.array(stormdata.loc[:, 'day'].astype(int)) + hr = np.array(stormdata.loc[:, 'hour'].astype(int)) + # Start looping through all the times in the given storm we are on for t, time in enumerate(times): - #Get time index from the model list of times that matches the track time currently on + # Get time index from the model list of times that matches the track time currently on tind = itlist.index(times[t]) - #Get the clat/clon position that is closest to what is provided in track data + # Get the clat/clon position that is closest to what is provided in track data clat = ds['lat'].sel(lat=clats[t], method='nearest') clon = ds['lon'].sel(lon=clons[t], method='nearest') - #Get the index of the above found clat/clon + # Get the index of the above found clat/clon iclat = ilatlist.index(clat) iclon = ilonlist.index(clon) - #Now set up bounds of 10X10 deg box based on index spacing and must go 1 higher for largest bound - latmax = iclat+int((latlen-1)/2+1) - latmin = iclat-int((latlen-1)/2) - lonmax = iclon+int((lonlen-1)/2+1) - lonmin = iclon-int((lonlen-1)/2) - #Now gather the lat/lon array for the box - latbox = np.array(ds.lat.isel(lat=slice(latmin,latmax))) - lonbox = np.array(ds.lon.isel(lon=slice(lonmin,lonmax))) - #Now make a 2D array based on the land-sea mask that is zeros or NaN if >20% - landsea_zerosNaNs = np.zeros((len(latbox),len(lonbox))) - #Open the parent land-sea mask file from outside the loop that is sliced according to the lat/lon bounds above - landsea_sliced = np.squeeze(lsm.isel(lat=slice(latmin,latmax),lon=slice(lonmin,lonmax))) - #Now loop through the sliced land-sea mask to assign NaNs to the grid points that are >20 - for i in range(0,len(latbox)): - for j in range(0,len(lonbox)): - if(landsea_sliced[i][j] > 20): - landsea_zerosNaNs[i][j]=np.nan - - #Getting h data and calculating h - g=9.8 #m/s^2 - Cp=1.00464e3 #J/(kg*K) - Lv=2.501e6 #J/kg - #Getting geopotential - phi = np.squeeze(phi_ds['zg'].isel(time=tind,lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - #Getting temp - T = np.squeeze(T_ds['ta'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - #Getting q - q = np.squeeze(q_ds['hus'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - #Calculate MSE - mse = Cp*T + g*phi + Lv*q - #Calculate MSE (Temperature Contribution) - mseT = Cp*T - #Calculate MSE (Moisture Contribution) - mseMoist = Lv*q - #Get dp and range of p from any of datasets above as they all use same p and indexing - dp = -1*np.diff(phi_ds['plev'].isel(plev=slice(iminplev-1,iupperplev+1))) #To get a positive dp - dptile = np.transpose(np.tile(dp,(mse.shape[1],mse.shape[2],1)),(2,0,1)) - #Do column integration for column-integrated MSE - h = sum(mse[iminplev:iupperplev+1,:,:]*dptile)/g #Column-Integrated MSE - hTempContrib = sum(mseT[iminplev:iupperplev+1,:,:]*dptile)/g #Column-Integrated MSE (Only Temperature Contribution) - hMoistContrib = sum(mseMoist[iminplev:iupperplev+1,:,:]*dptile)/g #Column-Integrated MSE (Only Moisture Contribution) - - #Net LW at sfc regular (rlus-rlds) - rlus = np.squeeze(rlus_ds['rlus'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - rlds = np.squeeze(rlds_ds['rlds'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) + # Now set up bounds of 10X10 deg box based on index spacing and must go 1 higher for largest bound + latmax = iclat + int((latlen - 1) / 2 + 1) + latmin = iclat - int((latlen - 1) / 2) + lonmax = iclon + int((lonlen - 1) / 2 + 1) + lonmin = iclon - int((lonlen - 1) / 2) + # Now gather the lat/lon array for the box + latbox = np.array(ds.lat.isel(lat=slice(latmin, latmax))) + lonbox = np.array(ds.lon.isel(lon=slice(lonmin, lonmax))) + # Now make a 2D array based on the land-sea mask that is zeros or NaN if >20% + landsea_zerosNaNs = np.zeros((len(latbox), len(lonbox))) + # Open the parent land-sea mask file from outside the loop that is sliced according to the + # lat/lon bounds above + landsea_sliced = np.squeeze(lsm.isel(lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + # Now loop through the sliced land-sea mask to assign NaNs to the grid points that are >20 + for i in range(0, len(latbox)): + for j in range(0, len(lonbox)): + if (landsea_sliced[i][j] > 20): + landsea_zerosNaNs[i][j] = np.nan + + # Getting h data and calculating h + g = 9.8 # m/s^2 + Cp = 1.00464e3 # J/(kg*K) + Lv = 2.501e6 # J/kg + # Getting geopotential + phi = np.squeeze(phi_ds['zg'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + # Getting temp + T = np.squeeze(T_ds['ta'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + # Getting q + q = np.squeeze(q_ds['hus'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + # Calculate MSE + mse = Cp * T + g * phi + Lv * q + # Calculate MSE (Temperature Contribution) + mseT = Cp * T + # Calculate MSE (Moisture Contribution) + mseMoist = Lv * q + # Get dp and range of p from any of datasets above as they all use same p and indexing + dp = -1 * np.diff(phi_ds['plev'].isel(plev=slice(iminplev - 1, iupperplev + 1))) # To get a positive dp + dptile = np.transpose(np.tile(dp, (mse.shape[1], mse.shape[2], 1)), (2, 0, 1)) + # Do column integration for column-integrated MSE + h = sum(mse[iminplev:iupperplev + 1, :, :] * dptile) / g # Column-Integrated MSE + hTempContrib = sum(mseT[iminplev:iupperplev + 1, :, + :] * dptile) / g # Column-Integrated MSE (Only Temperature Contribution) + hMoistContrib = sum(mseMoist[iminplev:iupperplev + 1, :, + :] * dptile) / g # Column-Integrated MSE (Only Moisture Contribution) + + # Net LW at sfc regular (rlus-rlds) + rlus = np.squeeze(rlus_ds['rlus'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + rlds = np.squeeze(rlds_ds['rlds'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) netLWsfc = rlus - rlds - #Net SW at sfc regular (rsds - rsus) - rsds = np.squeeze(rsds_ds['rsds'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - rsus = np.squeeze(rsus_ds['rsus'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) + # Net SW at sfc regular (rsds - rsus) + rsds = np.squeeze(rsds_ds['rsds'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + rsus = np.squeeze(rsus_ds['rsus'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) netSWsfc = rsds - rsus - #Net LW at TOA regular (rlut) (no downwelling of LW at TOA) - rlut = np.squeeze(rlut_ds['rlut'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) + # Net LW at TOA regular (rlut) (no downwelling of LW at TOA) + rlut = np.squeeze(rlut_ds['rlut'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) netLWtoa = rlut - #Net SW at TOA regular (rsdt - rsut) (only one downwelling SW at TOA variable, incident) - rsdt = np.squeeze(rsdt_ds['rsdt'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - rsut = np.squeeze(rsut_ds['rsut'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) + # Net SW at TOA regular (rsdt - rsut) (only one downwelling SW at TOA variable, incident) + rsdt = np.squeeze(rsdt_ds['rsdt'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + rsut = np.squeeze(rsut_ds['rsut'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) netSWtoa = rsdt - rsut - #Column LW Flux Convergence regular (netLWsfc - netLWtoa) + # Column LW Flux Convergence regular (netLWsfc - netLWtoa) ClmnLWfluxConv = netLWsfc - netLWtoa - #Column SW Flux Convergence regular (netSWtoa - netSWsfc) + # Column SW Flux Convergence regular (netSWtoa - netSWsfc) ClmnSWfluxConv = netSWtoa - netSWsfc - #Column Radiative Flux Convergence regular (ClmnLWfluxConv + ClmnSWfluxConv) + # Column Radiative Flux Convergence regular (ClmnLWfluxConv + ClmnSWfluxConv) ClmnRadfluxConv = ClmnLWfluxConv + ClmnSWfluxConv - #Surface Moist Enthalpy Flux, sfc upward latent heat flux + sfc upward sensible heat flux (hfls + hfss) - hfls = np.squeeze(hfls_ds['hfls'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) - hfss = np.squeeze(hfss_ds['hfss'].isel(time=tind, lat=slice(latmin,latmax), lon=slice(lonmin,lonmax))) + # Surface Moist Enthalpy Flux, sfc upward latent heat flux + sfc upward sensible heat flux (hfls + hfss) + hfls = np.squeeze(hfls_ds['hfls'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) + hfss = np.squeeze(hfss_ds['hfss'].isel(time=tind, lat=slice(latmin, latmax), lon=slice(lonmin, lonmax))) SfcMoistEnthalpyFlux = hfls + hfss - #Outgoing Longwave Radiation (OLR): + # Outgoing Longwave Radiation (OLR): OLR = rlut - #MSE Budget Variables Calculations + # MSE Budget Variables Calculations havg = boxavg(h, latbox, lonbox) - hanom = h-havg + hanom = h - havg hTempavg = boxavg(hTempContrib, latbox, lonbox) - hTempanom = hTempContrib-hTempavg + hTempanom = hTempContrib - hTempavg hMoistavg = boxavg(hMoistContrib, latbox, lonbox) - hMoistanom = hMoistContrib-hMoistavg + hMoistanom = hMoistContrib - hMoistavg LWavg = boxavg(ClmnLWfluxConv, latbox, lonbox) - LWanom = ClmnLWfluxConv-LWavg + LWanom = ClmnLWfluxConv - LWavg OLRavg = boxavg(OLR, latbox, lonbox) - OLRanom = OLR-OLRavg + OLRanom = OLR - OLRavg SWavg = boxavg(ClmnSWfluxConv, latbox, lonbox) - SWanom = ClmnSWfluxConv-SWavg + SWanom = ClmnSWfluxConv - SWavg RADavg = boxavg(ClmnRadfluxConv, latbox, lonbox) - RADanom = ClmnRadfluxConv-RADavg + RADanom = ClmnRadfluxConv - RADavg SEFavg = boxavg(SfcMoistEnthalpyFlux, latbox, lonbox) - SEFanom = SfcMoistEnthalpyFlux-SEFavg + SEFanom = SfcMoistEnthalpyFlux - SEFavg HFLSavg = boxavg(hfls, latbox, lonbox) - HFLSanom = hfls-HFLSavg + HFLSanom = hfls - HFLSavg HFSSavg = boxavg(hfss, latbox, lonbox) - HFSSanom = hfss-HFSSavg + HFSSanom = hfss - HFSSavg - hvar = np.multiply(np.array(hanom),np.array(hanom)) + hvar = np.multiply(np.array(hanom), np.array(hanom)) - hMoistvar = np.multiply(np.array(hMoistanom),np.array(hMoistanom)) + hMoistvar = np.multiply(np.array(hMoistanom), np.array(hMoistanom)) - hTempvar = np.multiply(np.array(hTempanom),np.array(hTempanom)) + hTempvar = np.multiply(np.array(hTempanom), np.array(hTempanom)) - hanomLWanom = np.multiply(np.array(hanom),np.array(LWanom)) + hanomLWanom = np.multiply(np.array(hanom), np.array(LWanom)) - hanomOLRanom = np.multiply(np.array(hanom),np.array(OLRanom)) + hanomOLRanom = np.multiply(np.array(hanom), np.array(OLRanom)) - hanomSWanom = np.multiply(np.array(hanom),np.array(SWanom)) + hanomSWanom = np.multiply(np.array(hanom), np.array(SWanom)) - hanomRADanom = np.multiply(np.array(hanom),np.array(RADanom)) + hanomRADanom = np.multiply(np.array(hanom), np.array(RADanom)) - hanomSEFanom = np.multiply(np.array(hanom),np.array(SEFanom)) + hanomSEFanom = np.multiply(np.array(hanom), np.array(SEFanom)) - hanomHFLSanom = np.multiply(np.array(hanom),np.array(HFLSanom)) + hanomHFLSanom = np.multiply(np.array(hanom), np.array(HFLSanom)) - hanomHFSSanom = np.multiply(np.array(hanom),np.array(HFSSanom)) + hanomHFSSanom = np.multiply(np.array(hanom), np.array(HFSSanom)) - hMoistanomLWanom = np.multiply(np.array(hMoistanom),np.array(LWanom)) + hMoistanomLWanom = np.multiply(np.array(hMoistanom), np.array(LWanom)) - hMoistanomOLRanom = np.multiply(np.array(hMoistanom),np.array(OLRanom)) + hMoistanomOLRanom = np.multiply(np.array(hMoistanom), np.array(OLRanom)) - hMoistanomSWanom = np.multiply(np.array(hMoistanom),np.array(SWanom)) + hMoistanomSWanom = np.multiply(np.array(hMoistanom), np.array(SWanom)) - hMoistanomRADanom = np.multiply(np.array(hMoistanom),np.array(RADanom)) + hMoistanomRADanom = np.multiply(np.array(hMoistanom), np.array(RADanom)) - hMoistanomSEFanom = np.multiply(np.array(hMoistanom),np.array(SEFanom)) + hMoistanomSEFanom = np.multiply(np.array(hMoistanom), np.array(SEFanom)) - hMoistanomHFLSanom = np.multiply(np.array(hMoistanom),np.array(HFLSanom)) + hMoistanomHFLSanom = np.multiply(np.array(hMoistanom), np.array(HFLSanom)) - hMoistanomHFSSanom = np.multiply(np.array(hMoistanom),np.array(HFSSanom)) + hMoistanomHFSSanom = np.multiply(np.array(hMoistanom), np.array(HFSSanom)) - hTempanomLWanom = np.multiply(np.array(hTempanom),np.array(LWanom)) + hTempanomLWanom = np.multiply(np.array(hTempanom), np.array(LWanom)) - hTempanomOLRanom = np.multiply(np.array(hTempanom),np.array(OLRanom)) + hTempanomOLRanom = np.multiply(np.array(hTempanom), np.array(OLRanom)) - hTempanomSWanom = np.multiply(np.array(hTempanom),np.array(SWanom)) + hTempanomSWanom = np.multiply(np.array(hTempanom), np.array(SWanom)) - hTempanomRADanom = np.multiply(np.array(hTempanom),np.array(RADanom)) + hTempanomRADanom = np.multiply(np.array(hTempanom), np.array(RADanom)) - hTempanomSEFanom = np.multiply(np.array(hTempanom),np.array(SEFanom)) + hTempanomSEFanom = np.multiply(np.array(hTempanom), np.array(SEFanom)) - hTempanomHFLSanom = np.multiply(np.array(hTempanom),np.array(HFLSanom)) + hTempanomHFLSanom = np.multiply(np.array(hTempanom), np.array(HFLSanom)) hTempanomHFSSanom = np.multiply(np.array(hTempanom), np.array(HFSSanom)) - #Now save the data variables to its corresponding save name created in outer loop and add the land-sea mask to convert >20% land grids to NaN - #4D Variables - h_save[s,t,0:len(latbox),0:len(lonbox)] = h + landsea_zerosNaNs - hMoistContrib_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistContrib + landsea_zerosNaNs - hTempContrib_save[s,t,0:len(latbox),0:len(lonbox)] = hTempContrib + landsea_zerosNaNs - ClmnLWfluxConv_save[s,t,0:len(latbox),0:len(lonbox)] = ClmnLWfluxConv + landsea_zerosNaNs - ClmnSWfluxConv_save[s,t,0:len(latbox),0:len(lonbox)] = ClmnSWfluxConv + landsea_zerosNaNs - ClmnRadfluxConv_save[s,t,0:len(latbox),0:len(lonbox)] = ClmnRadfluxConv + landsea_zerosNaNs - OLR_save[s,t,0:len(latbox),0:len(lonbox)] = OLR + landsea_zerosNaNs - hfls_save[s,t,0:len(latbox),0:len(lonbox)] = hfls + landsea_zerosNaNs - hfss_save[s,t,0:len(latbox),0:len(lonbox)] = hfss + landsea_zerosNaNs - sfcMoistEnthalpyFlux_save[s,t,0:len(latbox),0:len(lonbox)] = SfcMoistEnthalpyFlux + landsea_zerosNaNs - #MSE Budget Variables - hanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanom + landsea_zerosNaNs - hvar_save[s,t,0:len(latbox),0:len(lonbox)] = hvar + landsea_zerosNaNs - hMoistvar_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistvar + landsea_zerosNaNs - hTempvar_save[s,t,0:len(latbox),0:len(lonbox)] = hTempvar + landsea_zerosNaNs - hanom_LWanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomLWanom + landsea_zerosNaNs - hanom_OLRanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomOLRanom + landsea_zerosNaNs - hanom_SWanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomSWanom + landsea_zerosNaNs - hanom_RADanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomRADanom + landsea_zerosNaNs - hanom_SEFanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomSEFanom + landsea_zerosNaNs - hanom_hflsanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomHFLSanom + landsea_zerosNaNs - hanom_hfssanom_save[s,t,0:len(latbox),0:len(lonbox)] = hanomHFSSanom + landsea_zerosNaNs - hMoistanom_LWanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomLWanom + landsea_zerosNaNs - hMoistanom_OLRanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomOLRanom + landsea_zerosNaNs - hMoistanom_SWanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomSWanom + landsea_zerosNaNs - hMoistanom_RADanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomRADanom + landsea_zerosNaNs - hMoistanom_SEFanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomSEFanom + landsea_zerosNaNs - hMoistanom_hflsanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomHFLSanom + landsea_zerosNaNs - hMoistanom_hfssanom_save[s,t,0:len(latbox),0:len(lonbox)] = hMoistanomHFSSanom + landsea_zerosNaNs - hTempanom_LWanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomLWanom + landsea_zerosNaNs - hTempanom_OLRanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomOLRanom + landsea_zerosNaNs - hTempanom_SWanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomSWanom + landsea_zerosNaNs - hTempanom_RADanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomRADanom + landsea_zerosNaNs - hTempanom_SEFanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomSEFanom + landsea_zerosNaNs - hTempanom_hflsanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomHFLSanom + landsea_zerosNaNs - hTempanom_hfssanom_save[s,t,0:len(latbox),0:len(lonbox)] = hTempanomHFSSanom + landsea_zerosNaNs - - #3D Variables - latbox_save[s,t,0:len(latbox)] = latbox - lonbox_save[s,t,0:len(lonbox)] = lonbox - - #2D Variables - maxwind_save[s,t] = maxwind[t] - minSLP_save[s,t] = minSLP[t] - Clat_save[s,t] = clat - Clon_save[s,t] = clon - year_save[s,t] = yr[t] - month_save[s,t] = mo[t] - day_save[s,t] = d[t] - hour_save[s,t] = hr[t] - -##### Save the variables, regular variables for each year and budget variables for each year + # Now save the data variables to its corresponding save name created in outer loop and add the land-sea mask to convert >20% land grids to NaN + # 4D Variables + h_save[s, t, 0:len(latbox), 0:len(lonbox)] = h + landsea_zerosNaNs + hMoistContrib_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistContrib + landsea_zerosNaNs + hTempContrib_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempContrib + landsea_zerosNaNs + ClmnLWfluxConv_save[s, t, 0:len(latbox), 0:len(lonbox)] = ClmnLWfluxConv + landsea_zerosNaNs + ClmnSWfluxConv_save[s, t, 0:len(latbox), 0:len(lonbox)] = ClmnSWfluxConv + landsea_zerosNaNs + ClmnRadfluxConv_save[s, t, 0:len(latbox), 0:len(lonbox)] = ClmnRadfluxConv + landsea_zerosNaNs + OLR_save[s, t, 0:len(latbox), 0:len(lonbox)] = OLR + landsea_zerosNaNs + hfls_save[s, t, 0:len(latbox), 0:len(lonbox)] = hfls + landsea_zerosNaNs + hfss_save[s, t, 0:len(latbox), 0:len(lonbox)] = hfss + landsea_zerosNaNs + sfcMoistEnthalpyFlux_save[s, t, 0:len(latbox), 0:len(lonbox)] = SfcMoistEnthalpyFlux + landsea_zerosNaNs + # MSE Budget Variables + hanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanom + landsea_zerosNaNs + hvar_save[s, t, 0:len(latbox), 0:len(lonbox)] = hvar + landsea_zerosNaNs + hMoistvar_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistvar + landsea_zerosNaNs + hTempvar_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempvar + landsea_zerosNaNs + hanom_LWanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomLWanom + landsea_zerosNaNs + hanom_OLRanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomOLRanom + landsea_zerosNaNs + hanom_SWanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomSWanom + landsea_zerosNaNs + hanom_RADanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomRADanom + landsea_zerosNaNs + hanom_SEFanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomSEFanom + landsea_zerosNaNs + hanom_hflsanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomHFLSanom + landsea_zerosNaNs + hanom_hfssanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hanomHFSSanom + landsea_zerosNaNs + hMoistanom_LWanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomLWanom + landsea_zerosNaNs + hMoistanom_OLRanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomOLRanom + landsea_zerosNaNs + hMoistanom_SWanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomSWanom + landsea_zerosNaNs + hMoistanom_RADanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomRADanom + landsea_zerosNaNs + hMoistanom_SEFanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomSEFanom + landsea_zerosNaNs + hMoistanom_hflsanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomHFLSanom + landsea_zerosNaNs + hMoistanom_hfssanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hMoistanomHFSSanom + landsea_zerosNaNs + hTempanom_LWanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomLWanom + landsea_zerosNaNs + hTempanom_OLRanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomOLRanom + landsea_zerosNaNs + hTempanom_SWanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomSWanom + landsea_zerosNaNs + hTempanom_RADanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomRADanom + landsea_zerosNaNs + hTempanom_SEFanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomSEFanom + landsea_zerosNaNs + hTempanom_hflsanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomHFLSanom + landsea_zerosNaNs + hTempanom_hfssanom_save[s, t, 0:len(latbox), 0:len(lonbox)] = hTempanomHFSSanom + landsea_zerosNaNs + + # 3D Variables + latbox_save[s, t, 0:len(latbox)] = latbox + lonbox_save[s, t, 0:len(lonbox)] = lonbox + + # 2D Variables + maxwind_save[s, t] = maxwind[t] + minSLP_save[s, t] = minSLP[t] + Clat_save[s, t] = clat + Clon_save[s, t] = clon + year_save[s, t] = yr[t] + month_save[s, t] = mo[t] + day_save[s, t] = d[t] + hour_save[s, t] = hr[t] + + ##### Save the variables, regular variables for each year and budget variables for each year regvars_ds = xr.Dataset( - data_vars = dict( - h=(['numstorms','numsteps','latlen','lonlen'],h_save,{'units':'J/m^2','long_name':'Column-Integrated MSE','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistContrib=(['numstorms','numsteps','latlen','lonlen'],hMoistContrib_save,{'units':'J/m^2','long_name':'Column-Integrated MSE','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempContrib=(['numstorms','numsteps','latlen','lonlen'],hTempContrib_save,{'units':'J/m^2','long_name':'Column-Integrated MSE','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - ClmnLWfluxConv=(['numstorms','numsteps','latlen','lonlen'],ClmnLWfluxConv_save,{'units':'W/m^2','long_name':'Column LW Flux Convergence','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - ClmnSWfluxConv=(['numstorms','numsteps','latlen','lonlen'],ClmnSWfluxConv_save,{'units':'W/m^2','long_name':'Column SW Flux Convergence','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - ClmnRadfluxConv=(['numstorms','numsteps','latlen','lonlen'],ClmnRadfluxConv_save,{'units':'W/m^2','long_name':'Column Radiative Flux Convergence','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - OLR=(['numstorms','numsteps','latlen','lonlen'],OLR_save,{'units':'W/m^2','long_name':'Outgoing LW Radiation','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hfls=(['numstorms','numsteps','latlen','lonlen'],hfls_save,{'units':'W/m^2','long_name':'Surface Upward Latent Heat Flux','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hfss=(['numstorms','numsteps','latlen','lonlen'],hfss_save,{'units':'W/m^2','long_name':'Surface Upward Sensible Heat Flux','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - SEF=(['numstorms','numsteps','latlen','lonlen'],sfcMoistEnthalpyFlux_save,{'units':'W/m^2','long_name':'Surface Moist Enthalpy Flux','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - latitude=(['numstorms','numsteps','latlen'],latbox_save,{'units':'Degrees','long_name':'Latitude','_FillValue':-9999,'GridType':'1.0 deg Latitude Spacing'}), - longitude=(['numstorms','numsteps','lonlen'],lonbox_save,{'units':'Degrees','long_name':'Longitude','_FillValue':-9999,'GridType':'1.25 deg Longitude Spacing'}), - maxwind=(['numstorms','numsteps'],maxwind_save,{'units':'m/s','long_name':'Maximum Wind Speed','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - minSLP=(['numstorms','numsteps'],minSLP_save,{'units':'hPa','long_name':'Minimum Sea Level Pressure','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - centerLat=(['numstorms','numsteps'],Clat_save,{'units':'Degrees','long_name':'TC Center Latitude Position','_FillValue':-9999,'GridType':'1.0 deg Latitude Spacing'}), - centerLon=(['numstorms','numsteps'],Clon_save,{'units':'Degrees','long_name':'TC Center Longitude Position','_FillValue':-9999,'GridType':'1.25 deg Longitude Spacing'}), - year=(['numstorms','numsteps'],year_save,{'units':'Year of given storm','long_name':'year'}), - month=(['numstorms','numsteps'],month_save,{'units':'Month of given storm','long_name':'month'}), - day=(['numstorms','numsteps'],day_save,{'units':'Day of given storm','long_name':'day'}), - hour=(['numstorms','numsteps'],hour_save,{'units':'Hour of given storm','long_name':'hour'}) + data_vars=dict( + h=(['numstorms', 'numsteps', 'latlen', 'lonlen'], h_save, + {'units': 'J/m^2', 'long_name': 'Column-Integrated MSE', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hMoistContrib=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistContrib_save, + {'units': 'J/m^2', 'long_name': 'Column-Integrated MSE', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempContrib=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempContrib_save, + {'units': 'J/m^2', 'long_name': 'Column-Integrated MSE', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + ClmnLWfluxConv=(['numstorms', 'numsteps', 'latlen', 'lonlen'], ClmnLWfluxConv_save, + {'units': 'W/m^2', 'long_name': 'Column LW Flux Convergence', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + ClmnSWfluxConv=(['numstorms', 'numsteps', 'latlen', 'lonlen'], ClmnSWfluxConv_save, + {'units': 'W/m^2', 'long_name': 'Column SW Flux Convergence', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + ClmnRadfluxConv=(['numstorms', 'numsteps', 'latlen', 'lonlen'], ClmnRadfluxConv_save, + {'units': 'W/m^2', 'long_name': 'Column Radiative Flux Convergence', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + OLR=(['numstorms', 'numsteps', 'latlen', 'lonlen'], OLR_save, + {'units': 'W/m^2', 'long_name': 'Outgoing LW Radiation', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hfls=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hfls_save, + {'units': 'W/m^2', 'long_name': 'Surface Upward Latent Heat Flux', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hfss=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hfss_save, + {'units': 'W/m^2', 'long_name': 'Surface Upward Sensible Heat Flux', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + SEF=(['numstorms', 'numsteps', 'latlen', 'lonlen'], sfcMoistEnthalpyFlux_save, + {'units': 'W/m^2', 'long_name': 'Surface Moist Enthalpy Flux', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + latitude=(['numstorms', 'numsteps', 'latlen'], latbox_save, + {'units': 'Degrees', 'long_name': 'Latitude', '_FillValue': -9999, + 'GridType': '1.0 deg Latitude Spacing'}), + longitude=(['numstorms', 'numsteps', 'lonlen'], lonbox_save, + {'units': 'Degrees', 'long_name': 'Longitude', '_FillValue': -9999, + 'GridType': '1.25 deg Longitude Spacing'}), + maxwind=(['numstorms', 'numsteps'], maxwind_save, + {'units': 'm/s', 'long_name': 'Maximum Wind Speed', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + minSLP=(['numstorms', 'numsteps'], minSLP_save, + {'units': 'hPa', 'long_name': 'Minimum Sea Level Pressure', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + centerLat=(['numstorms', 'numsteps'], Clat_save, + {'units': 'Degrees', 'long_name': 'TC Center Latitude Position', '_FillValue': -9999, + 'GridType': '1.0 deg Latitude Spacing'}), + centerLon=(['numstorms', 'numsteps'], Clon_save, + {'units': 'Degrees', 'long_name': 'TC Center Longitude Position', '_FillValue': -9999, + 'GridType': '1.25 deg Longitude Spacing'}), + year=(['numstorms', 'numsteps'], year_save, {'units': 'Year of given storm', 'long_name': 'year'}), + month=(['numstorms', 'numsteps'], month_save, {'units': 'Month of given storm', 'long_name': 'month'}), + day=(['numstorms', 'numsteps'], day_save, {'units': 'Day of given storm', 'long_name': 'day'}), + hour=(['numstorms', 'numsteps'], hour_save, {'units': 'Hour of given storm', 'long_name': 'hour'}) ) ) - regvars_ds.to_netcdf(os.environ['WK_DIR']+ '/model/Model_Regular_Variables_'+str(year)+'.nc') + regvars_ds.to_netcdf(os.environ['WK_DIR'] + '/model/Model_Regular_Variables_' + str(year) + '.nc') regvars_ds.close() budgvars_ds = xr.Dataset( - data_vars = dict( - hanom=(['numstorms','numsteps','latlen','lonlen'],hanom_save,{'units':'J/m^2','long_name':'Column-Integrated MSE Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hvar=(['numstorms','numsteps','latlen','lonlen'],hvar_save,{'units':'J^2*m^-4','long_name':'Variance of Anomaly of Column-Integrated MSE','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistvar=(['numstorms','numsteps','latlen','lonlen'],hMoistvar_save,{'units':'J^2*m^-4','long_name':'Variance of Anomaly of Moist Contribution of h','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempvar=(['numstorms','numsteps','latlen','lonlen'],hTempvar_save,{'units':'J^2*m^-4','long_name':'Variance of Anomaly of Temp Contribution of h','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_LWanom=(['numstorms','numsteps','latlen','lonlen'],hanom_LWanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of LW Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_OLRanom=(['numstorms','numsteps','latlen','lonlen'],hanom_OLRanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of OLR Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_SWanom=(['numstorms','numsteps','latlen','lonlen'],hanom_SWanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of SW Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_RADanom=(['numstorms','numsteps','latlen','lonlen'],hanom_RADanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of RAD Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_SEFanom=(['numstorms','numsteps','latlen','lonlen'],hanom_SEFanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of SEF Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_hflsanom=(['numstorms','numsteps','latlen','lonlen'],hanom_hflsanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of HFLS Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hanom_hfssanom=(['numstorms','numsteps','latlen','lonlen'],hanom_hfssanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of HFSS Anomaly and h Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_LWanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_LWanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of LW Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_LWanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_LWanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of LW Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_SWanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_SWanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of SW Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_SWanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_SWanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of SW Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_OLRanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_OLRanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of OLR Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_OLRanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_OLRanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of OLR Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_RADanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_RADanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of RAD Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_RADanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_RADanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of RAD Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_SEFanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_SEFanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of SEF Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_SEFanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_SEFanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of SEF Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_hflsanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_hflsanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of HFLS Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_hflsanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_hflsanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of HFLS Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hMoistanom_hfssanom=(['numstorms','numsteps','latlen','lonlen'],hMoistanom_hfssanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of HFSS Anomaly and hMoistContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - hTempanom_hfssanom=(['numstorms','numsteps','latlen','lonlen'],hTempanom_hfssanom_save,{'units':'J^2*m^-4*s^-1','long_name':'Product of HFSS Anomaly and hTempContrib Anomaly','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - latitude=(['numstorms','numsteps','latlen'],latbox_save,{'units':'Degrees','long_name':'Latitude','_FillValue':-9999,'GridType':'1.0 deg Latitude Spacing'}), - longitude=(['numstorms','numsteps','lonlen'],lonbox_save,{'units':'Degrees','long_name':'Longitude','_FillValue':-9999,'GridType':'1.25 deg Longitude Spacing'}), - maxwind=(['numstorms','numsteps'],maxwind_save,{'units':'m/s','long_name':'Maximum Wind Speed','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - minSLP=(['numstorms','numsteps'],minSLP_save,{'units':'hPa','long_name':'Minimum Sea Level Pressure','_FillValue':-9999,'GridType':'Lat/Lon Grid'}), - centerLat=(['numstorms','numsteps'],Clat_save,{'units':'Degrees','long_name':'TC Center Latitude Position','_FillValue':-9999,'GridType':'1.0 deg Latitude Spacing'}), - centerLon=(['numstorms','numsteps'],Clon_save,{'units':'Degrees','long_name':'TC Center Longitude Position','_FillValue':-9999,'GridType':'1.25 deg Longitude Spacing'}), - year=(['numstorms','numsteps'],year_save,{'units':'Year of given storm','long_name':'year'}), - month=(['numstorms','numsteps'],month_save,{'units':'Month of given storm','long_name':'month'}), - day=(['numstorms','numsteps'],day_save,{'units':'Day of given storm','long_name':'day'}), - hour=(['numstorms','numsteps'],hour_save,{'units':'Hour of given storm','long_name':'hour'}) + data_vars=dict( + hanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_save, + {'units': 'J/m^2', 'long_name': 'Column-Integrated MSE Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hvar=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hvar_save, + {'units': 'J^2*m^-4', 'long_name': 'Variance of Anomaly of Column-Integrated MSE', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hMoistvar=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistvar_save, + {'units': 'J^2*m^-4', 'long_name': 'Variance of Anomaly of Moist Contribution of h', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hTempvar=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempvar_save, + {'units': 'J^2*m^-4', 'long_name': 'Variance of Anomaly of Temp Contribution of h', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_LWanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_LWanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of LW Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_OLRanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_OLRanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of OLR Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_SWanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_SWanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of SW Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_RADanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_RADanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of RAD Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_SEFanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_SEFanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of SEF Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_hflsanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_hflsanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of HFLS Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hanom_hfssanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hanom_hfssanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of HFSS Anomaly and h Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hMoistanom_LWanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_LWanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of LW Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_LWanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_LWanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of LW Anomaly and hTempContrib Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hMoistanom_SWanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_SWanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of SW Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_SWanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_SWanom_save, + {'units': 'J^2*m^-4*s^-1', 'long_name': 'Product of SW Anomaly and hTempContrib Anomaly', + '_FillValue': -9999, 'GridType': 'Lat/Lon Grid'}), + hMoistanom_OLRanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_OLRanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of OLR Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_OLRanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_OLRanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of OLR Anomaly and hTempContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hMoistanom_RADanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_RADanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of RAD Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_RADanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_RADanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of RAD Anomaly and hTempContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hMoistanom_SEFanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_SEFanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of SEF Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_SEFanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_SEFanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of SEF Anomaly and hTempContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hMoistanom_hflsanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_hflsanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of HFLS Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_hflsanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_hflsanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of HFLS Anomaly and hTempContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hMoistanom_hfssanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hMoistanom_hfssanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of HFSS Anomaly and hMoistContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + hTempanom_hfssanom=(['numstorms', 'numsteps', 'latlen', 'lonlen'], hTempanom_hfssanom_save, + {'units': 'J^2*m^-4*s^-1', + 'long_name': 'Product of HFSS Anomaly and hTempContrib Anomaly', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + latitude=(['numstorms', 'numsteps', 'latlen'], latbox_save, + {'units': 'Degrees', 'long_name': 'Latitude', '_FillValue': -9999, + 'GridType': '1.0 deg Latitude Spacing'}), + longitude=(['numstorms', 'numsteps', 'lonlen'], lonbox_save, + {'units': 'Degrees', 'long_name': 'Longitude', '_FillValue': -9999, + 'GridType': '1.25 deg Longitude Spacing'}), + maxwind=(['numstorms', 'numsteps'], maxwind_save, + {'units': 'm/s', 'long_name': 'Maximum Wind Speed', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + minSLP=(['numstorms', 'numsteps'], minSLP_save, + {'units': 'hPa', 'long_name': 'Minimum Sea Level Pressure', '_FillValue': -9999, + 'GridType': 'Lat/Lon Grid'}), + centerLat=(['numstorms', 'numsteps'], Clat_save, + {'units': 'Degrees', 'long_name': 'TC Center Latitude Position', '_FillValue': -9999, + 'GridType': '1.0 deg Latitude Spacing'}), + centerLon=(['numstorms', 'numsteps'], Clon_save, + {'units': 'Degrees', 'long_name': 'TC Center Longitude Position', '_FillValue': -9999, + 'GridType': '1.25 deg Longitude Spacing'}), + year=(['numstorms', 'numsteps'], year_save, {'units': 'Year of given storm', 'long_name': 'year'}), + month=(['numstorms', 'numsteps'], month_save, {'units': 'Month of given storm', 'long_name': 'month'}), + day=(['numstorms', 'numsteps'], day_save, {'units': 'Day of given storm', 'long_name': 'day'}), + hour=(['numstorms', 'numsteps'], hour_save, {'units': 'Hour of given storm', 'long_name': 'hour'}) ) ) - budgvars_ds.to_netcdf(os.environ['WK_DIR']+ '/model/Model_Budget_Variables_'+str(year)+'.nc') + budgvars_ds.to_netcdf(os.environ['WORK_DIR'] + '/model/Model_Budget_Variables_' + str(year) + '.nc') budgvars_ds.close() diff --git a/diagnostics/TC_MSE/settings.jsonc b/diagnostics/TC_MSE/settings.jsonc index adabb02e2..17e072512 100644 --- a/diagnostics/TC_MSE/settings.jsonc +++ b/diagnostics/TC_MSE/settings.jsonc @@ -12,10 +12,8 @@ // Human-readable name of the diagnostic. May contain spaces. "long_name" : "TC MSE Variance Budget diagnostic", - - // Modeling realm. If your diagnostic uses data from multiple realms, give - // this as a list. - "realm" : "atmos", + + "convention" : "cmip", // Human-readable name of the diagnostic. May contain spaces. This // is used to describe your diagnostic on the top-level index.html page. @@ -45,8 +43,16 @@ // "dimensions" attribute for each variable must correspond to a coordinate // named here. "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis":"X" + }, "time": {"standard_name": "time"}, "plev": { "standard_name": "air_pressure", @@ -62,84 +68,84 @@ "varlist" : { "ta": { "standard_name": "air_temperature", - "path_variable" : "ta_var", + "realm": "atmos", "units": "K", "frequency": "6hr", "dimensions": ["time","plev","lat","lon"] }, "zg": { "standard_name": "geopotential_height", - "path_variable" : "zg_var", + "realm": "atmos", "units": "m", "frequency": "6hr", "dimensions": ["time","plev","lat","lon"] }, "hus": { "standard_name": "specific_humidity", - "path_variable" : "hus_var", + "realm": "atmos", "units": "1", "frequency": "6hr", "dimensions": ["time","plev","lat","lon"] }, "hfss": { "standard_name": "surface_upward_sensible_heat_flux", - "path_variable" : "hfss_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "hfls": { "standard_name": "surface_upward_latent_heat_flux", - "path_variable" : "hfls_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rlds": { "standard_name": "surface_downwelling_longwave_flux_in_air", - "path_variable" : "rlds_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rlus": { "standard_name": "surface_upwelling_longwave_flux_in_air", - "path_variable" : "rlus_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rlut": { "standard_name": "toa_outgoing_longwave_flux", - "path_variable" : "rlut_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rsds": { "standard_name": "surface_downwelling_shortwave_flux_in_air", - "path_variable" : "rsds_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rsdt": { "standard_name": "toa_incoming_shortwave_flux", - "path_variable" : "rsdt_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rsus": { "standard_name": "surface_upwelling_shortwave_flux_in_air", - "path_variable" : "rsus_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] }, "rsut": { "standard_name": "toa_outgoing_shortwave_flux", - "path_variable" : "rsut_var", + "realm": "atmos", "units": "W m-2", "frequency": "6hr", "dimensions": ["time","lat","lon"] diff --git a/diagnostics/TC_Rain/TC_Rain.html b/diagnostics/TC_Rain/TC_Rain.html index adaec40e5..184c2430f 100644 --- a/diagnostics/TC_Rain/TC_Rain.html +++ b/diagnostics/TC_Rain/TC_Rain.html @@ -7,7 +7,7 @@

- diff --git a/diagnostics/TC_Rain/TC_rain.py b/diagnostics/TC_Rain/TC_rain.py index 65f1cdf36..1916cd2a6 100644 --- a/diagnostics/TC_Rain/TC_rain.py +++ b/diagnostics/TC_Rain/TC_rain.py @@ -78,8 +78,7 @@ from scipy.interpolate import interp2d # used to interpolate the output model data import numpy as np - -### 1) Loading model data files: ############################################### +# ## 1) Loading model data files: ############################################### # basin = os.getenv("basin") # specifying the basin of TC Track input data thresh = [np.float(os.getenv("minthresh")), np.float(os.getenv("maxthresh"))] @@ -163,7 +162,7 @@ } x = x + 1 -### 2) Doing azimuthal average computations: ##################################################### +# 2) Doing azimuthal average computations: ##################################################### # dist function calculates the distancing between two points on a sphere @@ -295,7 +294,7 @@ def dist(p1, p2, radius): azaverage_plot.append(azavs) -### 3) plotting and saving output: ##################################################### +# 3) plotting and saving output: ##################################################### r = [ 12.5, @@ -337,7 +336,7 @@ def dist(p1, p2, radius): fname = "azimuthalaverage.eps" -output_fname = os.path.join(os.environ.get("WK_DIR"), "model", "PS", fname) +output_fname = os.path.join(os.environ.get("WORK_DIR"), "model", "PS", fname) plt.savefig(output_fname, format="eps") diff --git a/diagnostics/TC_Rain/settings.jsonc b/diagnostics/TC_Rain/settings.jsonc index e69e6d15a..3c15df228 100644 --- a/diagnostics/TC_Rain/settings.jsonc +++ b/diagnostics/TC_Rain/settings.jsonc @@ -9,7 +9,7 @@ "settings" : { "driver" : "TC_rain.py", "long_name" : "Azimuthal Average of TC Rain Rate", - "realm" : "atmos", + "convention" : "cmip", "description" : "TC rain rate azimuthal average", "runtime_requirements": { "python3": [ @@ -27,8 +27,16 @@ }, "dimensions": { - "latitude": {"standard_name": "latitude"}, - "longitude": {"standard_name": "longitude"}, + "latitude": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "longitude": { + "standard_name": "longitude", + "units": "degrees+_east" + "axis": "X" + }, "time": {"standard_name": "time"} }, "data": { @@ -37,6 +45,7 @@ "varlist" : { "tp": { "standard_name" : "total_precipitation", + "realm": "atmos", "units": "kg m-2", "dimensions": ["time", "latitude", "longitude"] diff --git a/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.html b/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.html index e4b0b933a..ff90cf730 100644 --- a/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.html +++ b/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.html @@ -7,11 +7,12 @@

Wheeler-Kiladis Wavenumber Frequency Power Spectra

-Full Documentation and Contact Information + + Full Documentation and Contact Information

Total Precipitation, {{FIRSTYR}}-{{LASTYR}} +< color=navy>Total Precipitation, {{startdate}}-{{enddate}} {{ECMWF}} MODEL
- @@ -38,7 +39,7 @@

Wheeler-Kiladis Wavenumber Frequency Power Spectra

OLR (outgoing LW radiation) +< color=navy>OLR (outgoing LW radiation) {{CASENAME}} NCEP
- @@ -65,7 +66,7 @@

Wheeler-Kiladis Wavenumber Frequency Power Spectra

U200 (200 hPa zonal wind) +< color=navy>U200 (200 hPa zonal wind) {{CASENAME}} NCEP
- @@ -92,7 +93,7 @@

Wheeler-Kiladis Wavenumber Frequency Power Spectra

U850 (850 hPa zonal wind) +< color=navy>U850 (850 hPa zonal wind) {{CASENAME}} NCEP
- @@ -119,7 +120,7 @@

Wheeler-Kiladis Wavenumber Frequency Power Spectra

OMEGA500 (500 hPa vertical velocity) +< color=navy>OMEGA500 (500 hPa vertical velocity) {{CASENAME}} NCEP
- diff --git a/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.py b/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.py index d26d17050..6b3dda4f4 100644 --- a/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.py +++ b/diagnostics/Wheeler_Kiladis/Wheeler_Kiladis.py @@ -1,6 +1,6 @@ # This file is part of the Wheeler_Kiladis module of the MDTF code package (see LICENSE.txt) -#============================================================ +# ============================================================ # Wheeler-Kiladis Plots # Sample code to call NCL from python # @@ -12,15 +12,18 @@ # Convectively Coupled Equatorial Waves: Analysis of Clouds # and Temperature in the Wavenumber-Frequency Domain. # J. Atmos. Sci., 56, 374-399. -#============================================================ +# ============================================================ import os import subprocess import time -#============================================================ + +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_plots(nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call @@ -32,34 +35,33 @@ def generate_ncl_plots(nclPlotFile): try: pipe = subprocess.Popen(['ncl {0}'.format(nclPlotFile)], shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].decode() - print('NCL routine {0} \n {1}'.format(nclPlotFile,output)) + print('NCL routine {0} \n {1}'.format(nclPlotFile, output)) while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 + print("COMPUTING THE SPACE-TIME SPECTRA") -#============================================================ +# ============================================================ # Check data exists and Call NCL code -#============================================================ +# ============================================================ os.chdir(os.environ["DATADIR"]) # inputdata -#OLR - -varlist = [("u200_var","U200_FILE"), ("u850_var","U850_FILE"), ("omega500_var", "OMEGA500_FILE"), - ("rlut_var", "RLUT_FILE"), ("pr_var", "PR_FILE")] +# OLR +varlist = [("u200_var", "U200_FILE"), ("u850_var", "U850_FILE"), ("omega500_var", "OMEGA500_FILE"), + ("rlut_var", "RLUT_FILE"), ("pr_var", "PR_FILE")] for var, file_ in varlist: - print("starting var "+var) - if os.path.isfile(os.environ[file_]): - os.environ["file_WK"] = os.environ[file_] - os.environ["MVAR"] = os.environ[var] - #print("file of "+os.environ[var]+" for Wheeler-Kiladis plots found, computing wave spectra") - generate_ncl_plots(os.environ["POD_HOME"]+"/wkSpaceTime_driver.ncl") - else: - print("WARNING: file not found ("+os.environ[var]+") skipping wave spectra computation") - + print("starting var " + var) + if os.path.isfile(os.environ[file_]): + os.environ["file_WK"] = os.environ[file_] + os.environ["MVAR"] = os.environ[var] + # print("file of "+os.environ[var]+" for Wheeler-Kiladis plots found, computing wave spectra") + generate_ncl_plots(os.environ["POD_HOME"] + "/wkSpaceTime_driver.ncl") + else: + print("WARNING: file not found (" + os.environ[var] + ") skipping wave spectra computation") diff --git a/diagnostics/Wheeler_Kiladis/debug_plots.ncl b/diagnostics/Wheeler_Kiladis/debug_plots.ncl index a482e3444..8b0f1ec2c 100644 --- a/diagnostics/Wheeler_Kiladis/debug_plots.ncl +++ b/diagnostics/Wheeler_Kiladis/debug_plots.ncl @@ -42,7 +42,7 @@ begin debug_print("one_plot, map_flag = "+map_flag+", writing figure: "+file_name,funcname,debug_flag) - dir_out = getenv("WK_DIR")+"/model/PS/debug/" ; output dir: should be working directory + dir_out = getenv("WORK_DIR")+"/model/PS/debug/" ; output dir: should be working directory system("mkdir -p "+dir_out) file_out = dir_out+"/"+file_name wks = gsn_open_wks("png",file_out) ; send graphics to PNG file @@ -103,7 +103,7 @@ begin debug_print(" min="+min(diff)+" max="+max(diff),funcname,debug_flag) ;---Plot - dir_out = getenv("WK_DIR")+"/model/PS/debug/" ; output dir + dir_out = getenv("WORK_DIR")+"/model/PS/debug/" ; output dir system("mkdir -p "+dir_out) file_out = dir_out+"/diff_"+file_name wks = gsn_open_wks("png",file_out) ; send graphics to PNG file diff --git a/diagnostics/Wheeler_Kiladis/settings.jsonc b/diagnostics/Wheeler_Kiladis/settings.jsonc index 5f52399ec..5db26fe00 100644 --- a/diagnostics/Wheeler_Kiladis/settings.jsonc +++ b/diagnostics/Wheeler_Kiladis/settings.jsonc @@ -13,7 +13,7 @@ "settings" : { "driver" : "Wheeler_Kiladis.py", "long_name" : "Wheeler Kiladis plots", - "realm" : "atmos", + "convention": "cesm", "description" : "Wavenumber-Frequency Power Spectra (Wheeler and Kiladis)", "runtime_requirements": { "python3": [], @@ -24,8 +24,16 @@ "frequency": "day" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -37,28 +45,33 @@ "varlist": { "rlut": { "standard_name": "toa_outgoing_longwave_flux", + "realm": "atmos", "units": "W m-2", "dimensions": ["time", "lat", "lon"] }, "pr": { "standard_name": "precipitation_rate", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"] }, "omega500": { "standard_name": "lagrangian_tendency_of_air_pressure", + "realm": "atmos", "units": "Pa s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 500} }, "u200": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 200} }, "u850": { "standard_name": "eastward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev": 850} diff --git a/diagnostics/Wheeler_Kiladis/wkSpaceTime_driver.ncl b/diagnostics/Wheeler_Kiladis/wkSpaceTime_driver.ncl index 9ad39ffdf..1a168b974 100644 --- a/diagnostics/Wheeler_Kiladis/wkSpaceTime_driver.ncl +++ b/diagnostics/Wheeler_Kiladis/wkSpaceTime_driver.ncl @@ -107,7 +107,7 @@ begin latBound = 15 ; latBound = stringtointeger(getenv("LATBND")) - diro = getenv("WK_DIR")+"/model/PS/" ; output dir: location of plots + diro = getenv("WORK_DIR")+"/model/PS/" ; output dir: location of plots print ("writing to "+diro) ;------------------------------------------------------------------- ; spectral input params for wkSpaceTime @@ -182,8 +182,8 @@ begin latN = latBound latS =-latBound ; make symmetric about the equator - yr1 = stringtointeger(getenv("FIRSTYR")) - yr2 = stringtointeger(getenv("LASTYR")) + yr1 = stringtointeger(getenv("startdate")) + yr2 = stringtointeger(getenv("enddate")) f=addfile(fili,"r") ; DRB: should move all the time/calendar stuff to a function diff --git a/diagnostics/__init__.py b/diagnostics/__init__.py index e69de29bb..b795e98a4 100644 --- a/diagnostics/__init__.py +++ b/diagnostics/__init__.py @@ -0,0 +1,28 @@ +from .albedofb import albedofb +from .blocking_neale import blocking_neale +from .convective_transition_diag import convective_transition_diag_v2 +from .ENSO_MSE import ENSO_MSE +from .ENSO_RWS import ENSO_RWS +from .EOF_500hPa import EOF_500hPa +from .eulerian_storm_track import eulerian_storm_track +from .example import example_diag +from .example_multicase import example_multicase +from .mixed_layer_depth import mixed_layer_depth +from .MJO_prop_amp import MJO_prop_amp +from .MJO_suite import MJO_suite +from .MJO_teleconnection import mjo_teleconnection +from .ocn_surf_flux_diag import ocn_surf_flux_diag +from .precip_buoy_diag import precip_buoy_diag +from .precip_diurnal_cycle import precip_diurnal_cycle +from .seaice_suite import seaice_suite_sic_mean_sigma +from .SM_ET_coupling import SM_ET_coupling +from .stc_annular_modes import stc_annular_modes +from .stc_eddy_heat_fluxes import stc_eddy_heat_fluxes +from .stc_ozone import stc_ozone +from .stc_vert_wave_coupling import stc_vert_wave_coupling +from .TC_MSE import TC_MSE_Driver +from .TC_Rain import TC_rain +from .temp_extremes_distshape import temp_extremes_distshape +from .top_heaviness_metric import top_heaviness_metric +from .tropical_pacific_sea_level import tropical_pacific_sea_level +from .Wheeler_Kiladis import Wheeler_Kiladis \ No newline at end of file diff --git a/diagnostics/albedofb/albedofb.py b/diagnostics/albedofb/albedofb.py index f093fc3e7..e1455a45c 100644 --- a/diagnostics/albedofb/albedofb.py +++ b/diagnostics/albedofb/albedofb.py @@ -62,10 +62,10 @@ podname = 'albedofb' # these yrs only refer to the hist period for comparing kernel of mod to CERES -firstyr = "{FIRSTYR}".format(**os.environ) -lastyr = "{LASTYR}".format(**os.environ) +firstyr = "{STARTDATE}".format(**os.environ) +lastyr = "{ENDDATE}".format(**os.environ) -wk_dir = "{WK_DIR}".format(**os.environ) +wk_dir = "{WORK_DIR}".format(**os.environ) obs_dir = "{OBS_DATA}/".format(**os.environ) output_dir = wk_dir+'/model/' figures_dir = wk_dir+'/model/' diff --git a/diagnostics/albedofb/settings.jsonc b/diagnostics/albedofb/settings.jsonc index 5c55b8cf6..b8268f090 100644 --- a/diagnostics/albedofb/settings.jsonc +++ b/diagnostics/albedofb/settings.jsonc @@ -5,7 +5,7 @@ "settings": { "driver": "albedofb.py", "long_name": "Surface Albedo Feedback Diagnostics", - "realm": "atmos", + "convention": "cmip", "description": "Surface Albedo Feedback Diagnostics", "runtime_requirements": { "python3": [ @@ -27,19 +27,24 @@ }, "dimensions": { "lat": { - "standard_name": "latitude" - }, - "lon": { - "standard_name": "longitude" - }, + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": { - "standard_name": "time" - } + "standard_name": "time" + } }, "varlist": { "areacella": { "standard_name": "cell_area", "dimensions": ["lat", "lon"], + "realm": "atmos", "units": "m2", "modifier" : "atmos_realm", "requirement": "optional" @@ -48,12 +53,14 @@ "standard_name": "air_temperature", "frequency": "mon", "dimensions": ["time", "lat", "lon"], + "realm": "atmos", "modifier": "atmos_height", "units": "K", "requirement": "required" }, "rsdt": { "standard_name": "toa_incoming_shortwave_flux", + "realm": "atmos", "frequency": "mon", "units": "W m-2", "dimensions": ["time", "lat", "lon"], @@ -61,6 +68,7 @@ }, "rsds": { "standard_name": "surface_downwelling_shortwave_flux_in_air", + "realm": "atmos", "frequency": "mon", "units": "W m-2", "dimensions": ["time", "lat", "lon"], @@ -68,6 +76,7 @@ }, "rsut": { "standard_name": "toa_outgoing_shortwave_flux", + "realm": "atmos", "frequency": "mon", "units": "W m-2", "dimensions": ["time", "lat", "lon"], @@ -75,6 +84,7 @@ }, "rsus": { "standard_name": "surface_upwelling_shortwave_flux_in_air", + "realm": "atmos", "frequency": "mon", "units": "W m-2", "dimensions": ["time", "lat", "lon"], diff --git a/diagnostics/blocking_neale/blocking_digested.ncl b/diagnostics/blocking_neale/blocking_digested.ncl index d3c96eeee..53932e374 100644 --- a/diagnostics/blocking_neale/blocking_digested.ncl +++ b/diagnostics/blocking_neale/blocking_digested.ncl @@ -12,7 +12,7 @@ begin file_opts_out = file_opts_in ; copy everything - path_out = set_from_env("WK_DIR",False) ; wkdir + path_out = set_from_env("WORK_DIR",False) ; wkdir dir_out = set_from_env("MDTF_BLOCKING_WRITE_DIGESTED_DIR",False) ; write directory strs = str_split(file_opts_in@file_name, "/") ; separate strings of path by "/" diff --git a/diagnostics/blocking_neale/blocking_neale.py b/diagnostics/blocking_neale/blocking_neale.py index adb5a82d1..b5addf958 100644 --- a/diagnostics/blocking_neale/blocking_neale.py +++ b/diagnostics/blocking_neale/blocking_neale.py @@ -1,25 +1,25 @@ # This file is part of the blocking_neale module of the MDTF code package (see LICENSE.txt) - -#============================================================ +# ============================================================ # Rich Neale's Blocking Code # Sample code to call NCL from python -#============================================================ +# ============================================================ import os +import sys import subprocess import time import yaml - -#============================================================ +# ============================================================ # generate_html_file_header # # Note: the structure of the web page changes for multi-case # in that it is organized by pod/case instead of case/pod. # The reason this matters is that the MDTF graphic (mdtf_diag_banner) -# is either in the POD WK_DIR (multi) or one dir up (single) +# is either in the POD WORK_DIR (multi) or one dir up (single) # so we have to reference it different. -#============================================================ +# ============================================================ + def generate_html_file_header(f): """generate_html_file: write the html file header @@ -28,7 +28,7 @@ def generate_html_file_header(f): Arguments: f (file handle) """ -#First the part that everyone gets +# First the part that everyone gets html_template = """ @@ -39,8 +39,8 @@ def generate_html_file_header(f): """ f.write(html_template) -#Now different paths for the different types - if (os.environ["CASE_N"] == "1"): +# Now different paths for the different types + if os.environ["CASE_N"] == "1": html_template = """ """ @@ -53,7 +53,7 @@ def generate_html_file_header(f): f.write(html_template) -#Finish for all +# Finish for all html_template = """

Blocking Diagnostic (Rich Neale)

@@ -91,31 +91,34 @@ def generate_html_file_header(f): """ f.write(html_template) - -#============================================================ +# ============================================================ # generate_html_file_case_loop (for multirun) -#============================================================ -def generate_html_file_case_loop(f,case_dict): +# ============================================================ + + +def generate_html_file_case_loop(f, case_dict: dict): """generate_html_file: write the case information into the html template Arguments: f (file handle) - case_dict (nested dict [case1, [casename,firstyr,lastyr], + case_dict (nested dict [case1, [CASENAME, startdate, enddate], case2, [ ... ], ..., caseN, [ ... ]] Note: safe_substitute could be used; it leaves unmodified anything that doesn't have a match (instead of crashing) Note: any other case info in the dict can be replaced, eg: - case_template = Template("

PRECT (precipitation rate) +< color=navy>PRECT (precipitation rate) {{CASENAME}} TRMM
$CASENAME ($FIRSTYR-$LASTYR)") + case_template = Template(" + $CASENAME ($startdate-$enddate)") """ from string import Template - case_template = Template("
$CASENAME") - for case_name, case_settings in case_dict: + case_template = Template("
" + "$CASENAME") + for case_name, case_settings in case_dict.items(): html_template = case_template.substitute(case_settings) f.write(html_template) - #finalize the figure table, start the case settings table + # finalize the figure table, start the case settings table html_template = """

@@ -125,14 +128,14 @@ def generate_html_file_case_loop(f,case_dict): """ f.write(html_template) - #write the settings per case. First header. - # This prints the whole thing html_template = str(case_dict) + # write the settings per case. First header. + # This prints the whole thing html_template = str(case_dict) - case_template = Template("
$CASENAME") - settings_template = Template("$FIRSTYR - $LASTYR ") + case_template = Template("
$CASENAME") + settings_template = Template("$startdate - $enddate ") - for case_name, case_settings in case_dict: - html_template = case_template.safe_substitute(case_settings) + for case_name, case_settings in case_dict.items(): + html_template = case_template.safe_substitute(case_settings) f.write(html_template) html_template = settings_template.safe_substitute(case_settings) @@ -145,25 +148,24 @@ def generate_html_file_case_loop(f,case_dict):
POD Settings """ f.write(html_template) - - -#============================================================ +# ============================================================ # generate_html_file_case_single (NOT multirun) -#============================================================ -def generate_html_file_case_single(f): +# ============================================================ + + +def generate_html_file_case_single(f): """generate_html_file: write a template file that the framework will replace Arguments: f (file handle) - """ - # Write the Annual Cycle figure. Leave replacements to the framework (for now) # see case_multi for how to substitute eventually - html_template = "{{CASENAME}}" + html_template = \ + "{{CASENAME}}" f.write(html_template) - #finalize the figure table, start the case settings table + # finalize the figure table, start the case settings table html_template = """

@@ -174,12 +176,12 @@ def generate_html_file_case_single(f): f.write(html_template) # Write the settings per case. First header. - # note: to print the whole thing: html_template = str(case_dict) + # NOTE: to print the whole thing: html_template = str(case_dict) html_template = "{{CASENAME}}" f.write(html_template) - html_template = "{{FIRSTYR}} - {{LASTYR}} " + html_template = "{{startdate}} - {{enddate}} " f.write(html_template) # Finish the table @@ -188,11 +190,12 @@ def generate_html_file_case_single(f):

""" f.write(html_template) - -#============================================================ +# ============================================================ # generate_html_file_footer -#============================================================ +# ============================================================ + + def generate_html_file_footer(f): """generate_html_file_footer: write the footer to the the html template @@ -200,9 +203,9 @@ def generate_html_file_footer(f): Arguments: f (file handle) """ - #Finish off the website with all the settings from the run - #The following are replaced by the framework in a call from environment_manager.py - #It would be great to just dump the dict but it isn't accessible here + # Finish off the website with all the settings from the run + # The following are replaced by the framework in a call from environment_manager.py + # It would be great to just dump the dict but it isn't accessible here # maybe python codes are called with the pod object html_template = """ @@ -210,14 +213,14 @@ def generate_html_file_footer(f): POD Settings SEASON ANN - MDTF_BLOCKING_OBS "{{MDTF_BLOCKING_OBS}}" + MDTF_BLOCKING_OBS "{{MDTF_BLOCKING_OBS}}" MDTF_BLOCKING_CAM3 "{{MDTF_BLOCKING_CAM3}}" MDTF_BLOCKING_CAM4 "{{MDTF_BLOCKING_CAM4}}" MDTF_BLOCKING_CAM5 "{{MDTF_BLOCKING_CAM5}}" MDTF_BLOCKING_OBS_USE_CASE_YEARS "{{MDTF_BLOCKING_OBS_USE_CASE_YEARS}}" - MDTF_BLOCKING_OBS_CAM5 FIRSTYR - LASTYR "{{MDTF_BLOCKING_OBS_CAM5_FIRSTYR}} - {{MDTF_BLOCKING_OBS_CAM5_LASTYR}}" - MDTF_BLOCKING_OBS_ERA FIRSTYR - LASTYR "{{MDTF_BLOCKING_OBS_ERA_FIRSTYR }} - {{MDTF_BLOCKING_OBS_ERA_LASTYR}}" - MDTF_BLOCKING_OBS_MERRA FIRSTYR - LASTYR "{{MDTF_BLOCKING_OBS_MERRA_FIRSTYR}} - {{MDTF_BLOCKING_OBS_MERRA_LASTYR}}" + MDTF_BLOCKING_OBS_CAM5 STARTDATE - ENDDATE "{{MDTF_BLOCKING_OBS_CAM5_STARTDATE}} - {{MDTF_BLOCKING_OBS_CAM5_ENDDATE}}" + MDTF_BLOCKING_OBS_ERA STARTDATE - ENDDATE "{{MDTF_BLOCKING_OBS_ERA_STARTDATE }} - {{MDTF_BLOCKING_OBS_ERA_ENDDATE}}" + MDTF_BLOCKING_OBS_MERRA STARTDATE - ENDDATE "{{MDTF_BLOCKING_OBS_MERRA_STARTDATE}} - {{MDTF_BLOCKING_OBS_MERRA_ENDDATE}}" MDTF_BLOCKING_READ_DIGESTED "{{MDTF_BLOCKING_READ_DIGESTED}}" MDTF_BLOCKING_WRITE_DIGESTED "{{MDTF_BLOCKING_WRITE_DIGESTED}}" @@ -227,7 +230,7 @@ def generate_html_file_footer(f): MODEL_DATA_PATH "{{MODEL_DATA_PATH}}" OBS_DATA "{{OBS_DATA}}" POD_HOME "{{POD_HOME}}" - WK_DIR "{{WK_DIR}}" + WORK_DIR "{{WORK_DIR}}" case_env_file "{{case_env_file}}" zg_var "{{zg_var}}" @@ -240,35 +243,36 @@ def generate_html_file_footer(f): # writing the code into the file f.write(html_template) -#============================================================ +# ============================================================ # generate_html_file -#============================================================ -def generate_html_file(html_page,case_dict=None): +# ============================================================ + + +def generate_html_file(html_page: str, case_dict=None): """generate_html_file: write the html file template with generic variable names, for the correct cases - Arguments: html_page(string) file name full path + Arguments: html_page(string): file name full path case_dict (nested dict) """ - f = open(html_page,"w") + f = open(html_page, "w") generate_html_file_header(f) - if (os.environ["CASE_N"] == "1"): + if os.environ["CASE_N"] == "1": generate_html_file_case_single(f) else: - generate_html_file_case_loop(f,case_dict) + generate_html_file_case_loop(f, case_dict) generate_html_file_footer(f) - - + # close the file f.close() +# ============================================================ +# generate_ncl_plots - call a nclPlotFile via subprocess call +# ============================================================ -#============================================================ -# generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ def generate_ncl_plots(nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call @@ -280,11 +284,11 @@ def generate_ncl_plots(nclPlotFile): try: pipe = subprocess.Popen(['ncl {0}'.format(nclPlotFile)], shell=True, stdout=subprocess.PIPE) output = pipe.communicate()[0].decode() - print('NCL routine {0} \n {1}'.format(nclPlotFile,output)) + print('NCL routine {0} \n {1}'.format(nclPlotFile, output)) while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 @@ -292,19 +296,21 @@ def generate_ncl_plots(nclPlotFile): # MAIN ############################################################ -#============================================================ +# ============================================================ # Translate yaml file variables to environment variables for -# NCL programs to read -#============================================================ +# NCL programs to read +# ============================================================ + +# Check for $WORK_DIR/case_env.yaml, as sign of multiple cases + -# Check for $WKDIR/case_env.yaml, as sign of multipe cases print("blocking_neale.py looking for possible multicase case_env_file") env_var = "case_env_file" -if env_var in os.environ: +if env_var in os.environ: case_env_file = os.environ.get("case_env_file") print("blocking_neale.py case_env_file found? ",case_env_file) - if (os.path.isfile(case_env_file)): + if os.path.isfile(case_env_file): with open(case_env_file, 'r') as stream: try: case_info = yaml.safe_load(stream) @@ -315,47 +321,45 @@ def generate_ncl_plots(nclPlotFile): icase = 0 # index for cases, needed to save numbered env vars for case_name, case_settings in case_info.items(): - icase = icase+1 - print("case ",icase,": ",case_name) + icase = icase + 1 + print("case ", icase, ": ", case_name) for k, v in case_settings.items(): - casei_env_var_name = "CASE"+str(icase)+"_"+str(k) + casei_env_var_name = "CASE" + str(icase) + "_" + str(k) os.environ[casei_env_var_name] = str(v) - print("setenv ",casei_env_var_name,"\t ",v) + print("setenv ", casei_env_var_name, "\t ", v) os.environ["CASE_N"] = str(icase) - print("setenv ","CASE_N","\t ",icase) + print("setenv ", "CASE_N", "\t ", icase) else: print("No multicase case_env_file found so proceeding as single case") os.environ["CASE_N"] = "1" - -#============================================================ +# ============================================================ # Call NCL code here -#============================================================ +# ============================================================ if not os.path.exists(os.path.join(os.environ['DATADIR'], 'day')): os.makedirs(os.path.join(os.environ['DATADIR'], 'day')) print("blocking_neale.py calling blocking.ncl") generate_ncl_plots(os.environ["POD_HOME"]+"/blocking.ncl") - - -#============================================================ +# ============================================================ # Generate HTML page with correct number of cases -#============================================================ -#This is the proper place but the framework fails if there isn't something +# ============================================================ +# This is the proper place but the framework fails if there isn't something # in the POD_HOME dir, and placing a stub file there ends up overwriting this! -#html_page = os.environ["WK_DIR"]+"/blocking_neale.html" +# html_page = os.environ["WORK_DIR"]+"/blocking_neale.html" html_page = os.environ["POD_HOME"]+"/blocking_neale.html" -print("blocking_neale.py generating dynamic webpage ",html_page) +print("blocking_neale.py generating dynamic webpage ", html_page) -if (os.environ["CASE_N"] == "1"): +if os.environ["CASE_N"] == "1": generate_html_file(html_page) else: - generate_html_file(html_page,case_info.items()) + generate_html_file(html_page, case_info) +# ============================================================ -#============================================================ print("blocking_neale.py finished.") +sys.exit(0) diff --git a/diagnostics/blocking_neale/settings.jsonc b/diagnostics/blocking_neale/settings.jsonc index 1488127f1..835cafe91 100644 --- a/diagnostics/blocking_neale/settings.jsonc +++ b/diagnostics/blocking_neale/settings.jsonc @@ -14,10 +14,8 @@ // Human-readable name of the diagnostic. May contain spaces. "long_name" : "Rich Neale's blocking diagnostic", - - // Modeling realm. If your diagnostic uses data from multiple realms, give - // this as a list. - "realm" : "atmos", + // Data convention exepected by the diagnostic: cmip (default), cesm, or gfdl + "convention": "cesm", // Human-readable name of the diagnostic. May contain spaces. This // is used to describe your diagnostic on the top-level index.html page. @@ -70,8 +68,16 @@ // "dimensions" attribute for each variable must correspond to a coordinate // named here. "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -91,6 +97,7 @@ "path_variable" : "MODEL_DATA_PATH", "standard_name" : "geopotential_height", "units": "m", + "realm" : "atmos", "frequency" : "day", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"lev" : 500} diff --git a/diagnostics/convective_transition_diag/convecTransBasic.py b/diagnostics/convective_transition_diag/convecTransBasic.py index ba0ef29c6..f203a9c3a 100644 --- a/diagnostics/convective_transition_diag/convecTransBasic.py +++ b/diagnostics/convective_transition_diag/convecTransBasic.py @@ -3,29 +3,29 @@ # ====================================================================== # convecTransBasic.py # -# Convective Transition Basic Statistics -# as part of functionality provided by -# Convective Transition Diagnostic Package (convective_transition_diag_v1r3.py) +# Convective Transition Basic Statistics +# as part of functionality provided by +# Convective Transition Diagnostic Package (convective_transition_diag_v1r3.py) # -# Version 1 revision 3 13-Nov-2017 Yi-Hung Kuo (UCLA) -# PI: J. David Neelin (UCLA; neelin@atmos.ucla.edu) -# Current developer: Yi-Hung Kuo (yhkuo@atmos.ucla.edu) -# Contributors: K. A. Schiro (UCLA), B. Langenbrunner (UCLA), F. Ahmed (UCLA), -# C. Martinez (UCLA), C.-C. (Jack) Chen (NCAR) +# Version 1 revision 3 13-Nov-2017 Yi-Hung Kuo (UCLA) +# PI: J. David Neelin (UCLA; neelin@atmos.ucla.edu) +# Current developer: Yi-Hung Kuo (yhkuo@atmos.ucla.edu) +# Contributors: K. A. Schiro (UCLA), B. Langenbrunner (UCLA), F. Ahmed (UCLA), +# C. Martinez (UCLA), C.-C. (Jack) Chen (NCAR) # -# This file is part of the Convective Transition Diagnostic Package -# and the MDTF code package. See LICENSE.txt for the license. +# This file is part of the Convective Transition Diagnostic Package +# and the MDTF code package. See LICENSE.txt for the license. # -# Computes a set of Convective Transition Statistics as in Kuo et al. (2018). +# Computes a set of Convective Transition Statistics as in Kuo et al. (2018). # -# Generates plots of: +# Generates plots of: # (1) conditional average precipitation # (2) conditional probability of precipitation # (3) probability density function (PDF) of all events # (4) PDF of precipitating events # all as a function of column water vapor (CWV) and bulk tropospheric temperature # -# Depends on the following scripts: +# Depends on the following scripts: # (1) convecTransBasic_usp_calc.py # (2) convecTransBasic_usp_plot.py # (3) convecTransBasic_util.py @@ -33,27 +33,27 @@ # Bulk tropospheric temperature measures used include # (1) tave: mass-weighted column average temperature # (2) qsat_int: column-integrated saturation humidity -# Choose one by setting BULK_TROPOSPHERIC_TEMPERATURE_MEASURE -# in mdtf.py (or convecTransBasic_usp_calc.py) -# Here the column is 1000-200 hPa by default +# Choose one by setting BULK_TROPOSPHERIC_TEMPERATURE_MEASURE +# in mdtf.py (or convecTransBasic_usp_calc.py) +# Here the column is 1000-200 hPa by default # # tave & qsat_int are not standard model output yet, pre-processing calculates these two -# and saves them in the model output directory (if there is a permission issue, -# change PREPROCESSING_OUTPUT_DIR with related changes, or simply force -# data["SAVE_TAVE_QSAT_INT"]=0, both in convecTransBasic_usp_calc.py) +# and saves them in the model output directory (if there is a permission issue, +# change PREPROCESSING_OUTPUT_DIR with related changes, or simply force +# data["SAVE_TAVE_QSAT_INT"]=0, both in convecTransBasic_usp_calc.py) # # Defaults for binning choices, etc. that can be altered by user are in: -# convecTransBasic_usp_calc.py +# convecTransBasic_usp_calc.py # # Defaults for plotting choices that can be altered by user are in: -# convecTransBasic_usp_calc_plot.py +# convecTransBasic_usp_calc_plot.py # # Utility functions are defined in convecTransBasic_util.py # # To change regions over which binning computations are done, see -# convecTransBasic_usp_calc.py & -# generate_region_mask in convecTransBasic_util.py -# (and change obs_data/convective_transition_diag/region_0.25x0.25_costal2.5degExcluded.mat) +# convecTransBasic_usp_calc.py & +# generate_region_mask in convecTransBasic_util.py +# (and change obs_data/convective_transition_diag/region_0.25x0.25_costal2.5degExcluded.mat) # ====================================================================== # Import standard Python packages import os @@ -77,15 +77,15 @@ print("Load user-specified binning parameters..."), # Create and read user-specified parameters -os.system("python "+os.environ["POD_HOME"]+"/"+"convecTransBasic_usp_calc.py") -with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_calc_parameters.json") as outfile: - bin_data=json.load(outfile) +os.system("python "+ os.environ["POD_HOME"]+ "/" + "convecTransBasic_usp_calc.py") +with open(os.environ["WORK_DIR"]+"/" + "convecTransBasic_calc_parameters.json") as outfile: + bin_data = json.load(outfile) print("...Loaded!") print("Load user-specified plotting parameters..."), -os.system("python "+os.environ["POD_HOME"]+"/"+"convecTransBasic_usp_plot.py") -with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_plot_parameters.json") as outfile: - plot_data=json.load(outfile) +os.system("python " + os.environ["POD_HOME"] + "/" + "convecTransBasic_usp_plot.py") +with open(os.environ["WORK_DIR"] + "/" + "convecTransBasic_plot_parameters.json") as outfile: + plot_data = json.load(outfile) print("...Loaded!") # ====================================================================== @@ -96,31 +96,32 @@ # if so, skip binning; otherwise, bin data using model output # (see convecTransBasic_usp_calc.py for where the model output locate) -if (len(bin_data["bin_output_list"])==0 or bin_data["BIN_ANYWAY"]): +if len(bin_data["bin_output_list"]) == 0 or bin_data["BIN_ANYWAY"]: print("Starting binning procedure...") - if bin_data["PREPROCESS_TA"]==1: - print(" Atmospheric temperature pre-processing required") - if bin_data["SAVE_TAVE_QSAT_INT"]==1: - print(" Pre-processed temperature fields ("\ - +os.environ["tave_var"]+" & "+os.environ["qsat_int_var"]\ - +") will be saved to "+bin_data["PREPROCESSING_OUTPUT_DIR"]+"/") + if bin_data["PREPROCESS_TA"] == 1: + print(" Atmospheric temperature pre-processing required") + if bin_data["SAVE_TAVE_QSAT_INT"] == 1: + print(" Pre-processed temperature fields (" + + os.environ["tave_var"] + " & " + os.environ["qsat_int_var"] + + ") will be saved to " + bin_data["PREPROCESSING_OUTPUT_DIR"] + "/") # Load & pre-process region mask - REGION=generate_region_mask(bin_data["REGION_MASK_DIR"]+"/"+bin_data["REGION_MASK_FILENAME"], bin_data["pr_list"][0],bin_data["LAT_VAR"],bin_data["LON_VAR"]) + REGION=generate_region_mask(bin_data["REGION_MASK_DIR"] + "/" + bin_data["REGION_MASK_FILENAME"], + bin_data["pr_list"][0], bin_data["LAT_VAR"], bin_data["LON_VAR"]) # Pre-process temperature (if necessary) & bin & save binned results - binned_output=convecTransBasic_calc_model(REGION,bin_data["args1"]) + binned_output=convecTransBasic_calc_model(REGION, bin_data["args1"]) -else: # Binned data file exists & BIN_ANYWAY=False +else: # Binned data file exists & BIN_ANYWAY=False print("Binned output detected..."), binned_output=convecTransBasic_loadAnalyzedData(bin_data["args2"]) print("...Loaded!") # ====================================================================== # Plot binning results & save the figure in wkdir/MDTF_casename/.../ -convecTransBasic_plot(binned_output,plot_data["plot_params"],plot_data["args3"],plot_data["args4"]) +convecTransBasic_plot(binned_output, plot_data["plot_params"], plot_data["args3"], plot_data["args4"]) print("**************************************************") print("Convective Transition Basic Statistics (convecTransBasic.py) Executed!") diff --git a/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py b/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py index 1da392488..31efcc7e5 100644 --- a/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py +++ b/diagnostics/convective_transition_diag/convecTransBasic_usp_calc.py @@ -31,73 +31,73 @@ # ====================================================================== # Region mask directory & filename -REGION_MASK_DIR=os.environ["OBS_DATA"] -REGION_MASK_FILENAME="region_0.25x0.25_costal2.5degExcluded.mat" +REGION_MASK_DIR = os.environ["OBS_DATA"] +REGION_MASK_FILENAME = "region_0.25x0.25_costal2.5degExcluded.mat" # Number of regions # Use grids with 1<=region<=NUMBER_OF_REGIONS in the mask -NUMBER_OF_REGIONS=4 # default: 4 +NUMBER_OF_REGIONS = 4 # default: 4 # Region names -REGION_STR=["WPac","EPac","Atl","Ind"] +REGION_STR = ["WPac", "EPac", "Atl", "Ind"] # ====================================================================== # Directory for saving pre-processed temperature fields -# tave [K]: Mass-weighted column average temperature -# qsat_int [mm]: Column-integrated saturation specific humidity +# tave [K]: Mass-weighted column average temperature +# qsat_int [mm]: Column-integrated saturation specific humidity # USER MUST HAVE WRITE PERMISSION -# If one changes PREPROCESSING_OUTPUT_DIR, one must also modify data["tave_list"] -# & data["qsat_int_list"] below by replacing MODEL_OUTPUT_DIR with -# PREPROCESSING_OUTPUT_DIR -PREPROCESSING_OUTPUT_DIR=os.environ["DATADIR"] -TAVE_VAR=os.environ["tave_var"] -QSAT_INT_VAR=os.environ["qsat_int_var"] +# If one changes PREPROCESSING_OUTPUT_DIR, one must also modify data["tave_list"] +# & data["qsat_int_list"] below by replacing MODEL_OUTPUT_DIR with +# PREPROCESSING_OUTPUT_DIR +PREPROCESSING_OUTPUT_DIR = os.environ["DATADIR"] +TAVE_VAR = os.environ["tave_var"] +QSAT_INT_VAR = os.environ["qsat_int_var"] # Number of time-steps in Temperature-preprocessing -# Default: 1000 (use smaller numbers for limited memory) -time_idx_delta=1000 +# Default: 1000 (use smaller numbers for limited memory) +time_idx_delta = 1000 # Use 1:tave, or 2:qsat_int as Bulk Tropospheric Temperature Measure -BULK_TROPOSPHERIC_TEMPERATURE_MEASURE=int(os.environ["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"]) +BULK_TROPOSPHERIC_TEMPERATURE_MEASURE = int(os.environ["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"]) # ====================================================================== # Directory & Filename for saving binned results (netCDF4) # tave or qsat_int will be appended to BIN_OUTPUT_FILENAME -BIN_OUTPUT_DIR=os.environ["WK_DIR"]+"/model/netCDF" -BIN_OUTPUT_FILENAME=os.environ["CASENAME"]+".convecTransBasic" +BIN_OUTPUT_DIR = os.environ["WORK_DIR"] + "/model/netCDF" +BIN_OUTPUT_FILENAME = os.environ["CASENAME"] + ".convecTransBasic" # ====================================================================== # Re-do binning even if binned data file detected (default: True) -BIN_ANYWAY=True +BIN_ANYWAY = True # ====================================================================== # Column Water Vapor (CWV in mm) range & bin-width # CWV bin centers are integral multiples of cwv_bin_width -CWV_BIN_WIDTH=0.3 # default=0.3 (following satellite retrieval product) -CWV_RANGE_MAX=90.0 # default=90 (75 for satellite retrieval product) +CWV_BIN_WIDTH = 0.3 # default=0.3 (following satellite retrieval product) +CWV_RANGE_MAX = 90.0 # default=90 (75 for satellite retrieval product) # Mass-weighted Column Average Temperature tave [K] range & bin-width # with 1K increment and integral bin centers -T_RANGE_MIN=260.0 -T_RANGE_MAX=280.0 -T_BIN_WIDTH=1.0 +T_RANGE_MIN = 260.0 +T_RANGE_MAX = 280.0 +T_BIN_WIDTH = 1.0 # Column-integrated Saturation Specific Humidity qsat_int [mm] range & bin-width -# with bin centers = Q_RANGE_MIN + integer*Q_BIN_WIDTH +# with bin centers = Q_RANGE_MIN + integer*Q_BIN_WIDTH # Satellite retrieval suggests T_BIN_WIDTH=1 -# is approximately equivalent to Q_BIN_WIDTH=4.8 -Q_RANGE_MIN=16.0 -Q_RANGE_MAX=106.0 -Q_BIN_WIDTH=4.5 +# is approximately equivalent to Q_BIN_WIDTH=4.8 +Q_RANGE_MIN = 16.0 +Q_RANGE_MAX = 106.0 +Q_BIN_WIDTH = 4.5 # Define column [hPa] (default: 1000-200 hPa) -# One can re-define column by changing p_lev_bottom & p_lev_top, -# but one must also delete/re-name existing tave & qsat_int files -# since the default tave & qsat_int filenames do not contain conlumn info -p_lev_bottom=1000 -p_lev_top=200 +# One can re-define column by changing p_lev_bottom & p_lev_top, +# but one must also delete/re-name existing tave & qsat_int files +# since the default tave & qsat_int filenames do not contain conlumn info +p_lev_bottom = 1000 +p_lev_top = 200 # If model pressure levels are close to p_lev_bottom and/or p_lev_top -# (within dp-hPa neighborhood), use model level(s) to define column instead -dp=1.0 +# (within dp-hPa neighborhood), use model level(s) to define column instead +dp = 1.0 # Threshold value defining precipitating events [mm/hr] -PRECIP_THRESHOLD=0.25 +PRECIP_THRESHOLD = 0.25 # ====================================================================== # END USER SPECIFIED SECTION @@ -107,130 +107,132 @@ # DO NOT MODIFY CODE BELOW UNLESS # YOU KNOW WHAT YOU ARE DOING # ====================================================================== -data={} - -data["MODEL"]=MODEL -data["MODEL_OUTPUT_DIR"]=MODEL_OUTPUT_DIR -data["PREPROCESSING_OUTPUT_DIR"]=PREPROCESSING_OUTPUT_DIR - -data["REGION_MASK_DIR"]=REGION_MASK_DIR -data["REGION_MASK_FILENAME"]=REGION_MASK_FILENAME - -data["NUMBER_OF_REGIONS"]=NUMBER_OF_REGIONS -data["REGION_STR"]=REGION_STR - -data["TIME_VAR"]=TIME_VAR -data["LAT_VAR"]=LAT_VAR -data["LON_VAR"]=LON_VAR -data["TAVE_VAR"]=TAVE_VAR -data["QSAT_INT_VAR"]=QSAT_INT_VAR -data["PRES_VAR"]=PRES_VAR -data["time_idx_delta"]=time_idx_delta -data["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"]=BULK_TROPOSPHERIC_TEMPERATURE_MEASURE - -data["BIN_OUTPUT_DIR"]=BIN_OUTPUT_DIR -data["BIN_OUTPUT_FILENAME"]=BIN_OUTPUT_FILENAME - -if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: - data["BIN_OUTPUT_FILENAME"]+="_"+TAVE_VAR - data["TEMP_VAR"]=TAVE_VAR -elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2: - data["BIN_OUTPUT_FILENAME"]+="_"+QSAT_INT_VAR - data["TEMP_VAR"]=QSAT_INT_VAR - -data["BIN_ANYWAY"]=BIN_ANYWAY +data = {} + +data["MODEL"] = MODEL +data["MODEL_OUTPUT_DIR"] = MODEL_OUTPUT_DIR +data["PREPROCESSING_OUTPUT_DIR"] = PREPROCESSING_OUTPUT_DIR + +data["REGION_MASK_DIR"] = REGION_MASK_DIR +data["REGION_MASK_FILENAME"] = REGION_MASK_FILENAME + +data["NUMBER_OF_REGIONS"] = NUMBER_OF_REGIONS +data["REGION_STR"] = REGION_STR + +data["TIME_VAR"] = TIME_VAR +data["LAT_VAR"] = LAT_VAR +data["LON_VAR"] = LON_VAR +data["TAVE_VAR"] = TAVE_VAR +data["QSAT_INT_VAR"] = QSAT_INT_VAR +data["PRES_VAR"] = PRES_VAR +data["time_idx_delta"] = time_idx_delta +data["BULK_TROPOSPHERIC_TEMPERATURE_MEASURE"] = BULK_TROPOSPHERIC_TEMPERATURE_MEASURE + +data["BIN_OUTPUT_DIR"] = BIN_OUTPUT_DIR +data["BIN_OUTPUT_FILENAME"] = BIN_OUTPUT_FILENAME + +if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + data["BIN_OUTPUT_FILENAME"] += "_" + TAVE_VAR + data["TEMP_VAR"] = TAVE_VAR +elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + data["BIN_OUTPUT_FILENAME"] += "_" + QSAT_INT_VAR + data["TEMP_VAR"] = QSAT_INT_VAR + +data["BIN_ANYWAY"] = BIN_ANYWAY -data["CWV_BIN_WIDTH"]=CWV_BIN_WIDTH -data["CWV_RANGE_MAX"]=CWV_RANGE_MAX +data["CWV_BIN_WIDTH"] = CWV_BIN_WIDTH +data["CWV_RANGE_MAX"] = CWV_RANGE_MAX -data["T_RANGE_MIN"]=T_RANGE_MIN -data["T_RANGE_MAX"]=T_RANGE_MAX -data["T_BIN_WIDTH"]=T_BIN_WIDTH +data["T_RANGE_MIN"] = T_RANGE_MIN +data["T_RANGE_MAX"] = T_RANGE_MAX +data["T_BIN_WIDTH"] = T_BIN_WIDTH -data["Q_RANGE_MIN"]=Q_RANGE_MIN -data["Q_RANGE_MAX"]=Q_RANGE_MAX -data["Q_BIN_WIDTH"]=Q_BIN_WIDTH +data["Q_RANGE_MIN"] = Q_RANGE_MIN +data["Q_RANGE_MAX"] = Q_RANGE_MAX +data["Q_BIN_WIDTH"] = Q_BIN_WIDTH -data["p_lev_bottom"]=p_lev_bottom -data["p_lev_top"]=p_lev_top -data["dp"]=dp +data["p_lev_bottom"] = p_lev_bottom +data["p_lev_top"] = p_lev_top +data["dp"] = dp -data["PRECIP_THRESHOLD"]=PRECIP_THRESHOLD +data["PRECIP_THRESHOLD"] = PRECIP_THRESHOLD # List binned data file (with filename corresponding to casename) -data["bin_output_list"]=sorted(glob.glob(data["BIN_OUTPUT_DIR"]+"/"+data["BIN_OUTPUT_FILENAME"]+".nc")) +data["bin_output_list"] = sorted(glob.glob(data["BIN_OUTPUT_DIR"] + "/" + data["BIN_OUTPUT_FILENAME"] + ".nc")) # List available netCDF files # Assumes that the corresponding files in each list -# have the same spatial/temporal coverage/resolution -pr_list=sorted(glob.glob(os.environ["pr_file"])) -prw_list=sorted(glob.glob(os.environ["prw_file"])) -ta_list=sorted(glob.glob(os.environ["ta_file"])) +# have the same spatial/temporal coverage/resolution +pr_list = sorted(glob.glob(os.environ["pr_file"])) +prw_list = sorted(glob.glob(os.environ["prw_file"])) +ta_list = sorted(glob.glob(os.environ["ta_file"])) data["pr_list"] = pr_list data["prw_list"] = prw_list data["ta_list"] = ta_list # Check for pre-processed tave & qsat_int data -data["tave_list"]=sorted(glob.glob(os.environ["tave_file"])) -data["qsat_int_list"]=sorted(glob.glob(os.environ["qsat_int_file"])) +data["tave_list"] = sorted(glob.glob(os.environ["tave_file"])) +data["qsat_int_list"] = sorted(glob.glob(os.environ["qsat_int_file"])) -if (len(data["tave_list"])==0 or len(data["qsat_int_list"])==0): - data["PREPROCESS_TA"]=1 +if len(data["tave_list"]) == 0 or len(data["qsat_int_list"]) == 0: + data["PREPROCESS_TA"] = 1 else: - data["PREPROCESS_TA"]=0 + data["PREPROCESS_TA"] = 0 # Save pre-processed tave & qsat_int or not; default=0 (don't save) -data["SAVE_TAVE_QSAT_INT"]=int(os.environ["SAVE_TAVE_QSAT_INT"]) -if data["PREPROCESS_TA"]!=data["SAVE_TAVE_QSAT_INT"]: +data["SAVE_TAVE_QSAT_INT"] = int(os.environ["SAVE_TAVE_QSAT_INT"]) +if data["PREPROCESS_TA"] != data["SAVE_TAVE_QSAT_INT"]: print("Pre-processing of air temperature (ta) required to compute weighted column averages,") print(" but the pre-processed results will not be saved as intermediate output.") print("To save the pre-processed results as NetCDF files for re-use (write permission required),") print(" go to settings.jsonc, and changes SAVE_TAVE_QSAT_INT to 1.") # Taking care of function arguments for binning -data["args1"]=[ \ -BULK_TROPOSPHERIC_TEMPERATURE_MEASURE, \ -CWV_BIN_WIDTH, \ -CWV_RANGE_MAX, \ -T_RANGE_MIN, \ -T_RANGE_MAX, \ -T_BIN_WIDTH, \ -Q_RANGE_MIN, \ -Q_RANGE_MAX, \ -Q_BIN_WIDTH, \ -NUMBER_OF_REGIONS, \ -pr_list, \ -PR_VAR, \ -prw_list, \ -PRW_VAR, \ -data["PREPROCESS_TA"], \ -MODEL_OUTPUT_DIR, \ -data["qsat_int_list"], \ -QSAT_INT_VAR, \ -data["tave_list"], \ -TAVE_VAR, \ -ta_list, \ -TA_VAR, \ -PRES_VAR, \ -MODEL, \ -p_lev_bottom, \ -p_lev_top, \ -dp, \ -time_idx_delta, \ -data["SAVE_TAVE_QSAT_INT"], \ -PREPROCESSING_OUTPUT_DIR, \ -PRECIP_THRESHOLD, \ -data["BIN_OUTPUT_DIR"], \ -data["BIN_OUTPUT_FILENAME"], \ -TIME_VAR, \ -LAT_VAR, \ -LON_VAR ] - -data["args2"]=[ \ -data["bin_output_list"],\ -TAVE_VAR,\ -QSAT_INT_VAR,\ -BULK_TROPOSPHERIC_TEMPERATURE_MEASURE ] - -with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_calc_parameters.json", "w") as outfile: +data["args1"]=[ + BULK_TROPOSPHERIC_TEMPERATURE_MEASURE, + CWV_BIN_WIDTH, + CWV_RANGE_MAX, + T_RANGE_MIN, + T_RANGE_MAX, + T_BIN_WIDTH, + Q_RANGE_MIN, + Q_RANGE_MAX, + Q_BIN_WIDTH, + NUMBER_OF_REGIONS, + pr_list, + PR_VAR, + prw_list, + PRW_VAR, + data["PREPROCESS_TA"], + MODEL_OUTPUT_DIR, + data["qsat_int_list"], + QSAT_INT_VAR, + data["tave_list"], + TAVE_VAR, + ta_list, + TA_VAR, + PRES_VAR, + MODEL, + p_lev_bottom, + p_lev_top, + dp, + time_idx_delta, + data["SAVE_TAVE_QSAT_INT"], + PREPROCESSING_OUTPUT_DIR, + PRECIP_THRESHOLD, + data["BIN_OUTPUT_DIR"], + data["BIN_OUTPUT_FILENAME"], + TIME_VAR, + LAT_VAR, + LON_VAR +] + +data["args2"] = [ + data["bin_output_list"], + TAVE_VAR, + QSAT_INT_VAR, + BULK_TROPOSPHERIC_TEMPERATURE_MEASURE +] + +with open(os.environ["WORK_DIR"] + "/" + "convecTransBasic_calc_parameters.json", "w") as outfile: json.dump(data, outfile) diff --git a/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py b/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py index ef38c8790..e59b93ca7 100644 --- a/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py +++ b/diagnostics/convective_transition_diag/convecTransBasic_usp_plot.py @@ -13,139 +13,139 @@ import os import glob -with open(os.environ["WK_DIR"]+"/"+"convecTransBasic_calc_parameters.json") as outfile: +with open(os.environ["WORK_DIR"] + "/" + "convecTransBasic_calc_parameters.json") as outfile: bin_data=json.load(outfile) # ====================================================================== # START USER SPECIFIED SECTION # ====================================================================== # Don't plot bins with PDF0 and reg<=NUMBER_OF_REGIONS): + reg = REGION[lon_idx,lat_idx] + if reg > 0 and reg <= NUMBER_OF_REGIONS: cwv_idx=CWV[:,lat_idx,lon_idx] rain=RAIN[:,lat_idx,lon_idx] temp_idx=temp[:,lat_idx,lon_idx] @@ -59,7 +60,8 @@ def convecTransBasic_binTave(lon_idx, CWV_BIN_WIDTH, NUMBER_OF_REGIONS, NUMBER_T # takes arguments and bins by CWV & qsat_int bins @jit(nopython=True) -def convecTransBasic_binQsatInt(lon_idx, NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUMBER_CWV_BIN, PRECIP_THRESHOLD, REGION, CWV, RAIN, temp, p0, p1, p2, pe): +def convecTransBasic_binQsatInt(lon_idx, NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUMBER_CWV_BIN, PRECIP_THRESHOLD, REGION, + CWV, RAIN, temp, p0, p1, p2, pe): for lat_idx in numpy.arange(CWV.shape[1]): reg=REGION[lon_idx,lat_idx] if (reg>0 and reg<=NUMBER_OF_REGIONS): @@ -76,20 +78,22 @@ def convecTransBasic_binQsatInt(lon_idx, NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUM # ====================================================================== # generate_region_mask -# generates a map of integer values that correspond to regions using -# the file region_0.25x0.25_costal2.5degExcluded.mat -# in var_data/convective_transition_diag +# generates a map of integer values that correspond to regions using +# the file region_0.25x0.25_costal2.5degExcluded.mat +# in var_data/convective_transition_diag # Currently, there are 4 regions corresponding to ocean-only grid points -# in the Western Pacific (WPac), Eastern Pacific (EPac), -# Atlantic (Atl), and Indian (Ind) Ocean basins +# in the Western Pacific (WPac), Eastern Pacific (EPac), +# Atlantic (Atl), and Indian (Ind) Ocean basins # Coastal regions (within 2.5 degree with respect to sup-norm) are excluded -def generate_region_mask(region_mask_filename, model_netcdf_filename, lat_var, lon_var): - - print(" Generating region mask..."), +def generate_region_mask(region_mask_filename, + model_netcdf_filename, + lat_var, + lon_var): # Load & Pre-process Region Mask - matfile=scipy.io.loadmat(region_mask_filename) + print("Generating region mask...") + matfile = scipy.io.loadmat(region_mask_filename) lat_m=matfile["lat"] lon_m=matfile["lon"] # 0.125~359.875 deg region=matfile["region"] @@ -114,8 +118,8 @@ def generate_region_mask(region_mask_filename, model_netcdf_filename, lat_var, l lat=numpy.asarray(pr_netcdf.variables[lat_var][:],dtype="float") pr_netcdf.close() if lon[lon<0.0].size>0: - lon[lon[lon<0.0]]+=360.0 - lat=lat[numpy.logical_and(lat>=-20.0,lat<=20.0)] + lon[lon[lon<0.0]] += 360.0 + lat=lat[numpy.logical_and(lat >= -20.0, lat <= 20.0)] LAT,LON=numpy.meshgrid(lat,lon,sparse=False,indexing="xy") LAT=numpy.reshape(LAT,(-1,1)) @@ -132,17 +136,17 @@ def generate_region_mask(region_mask_filename, model_netcdf_filename, lat_var, l return REGION # Use the following 3 lines for plotting the resulting region mask - #REGION=numpy.reshape(REGION.astype(int),(-1,lat.size)) - #mp.contourf(lon.squeeze(), lat.squeeze(), REGION.T) - #mp.axes().set_aspect('equal') + # REGION=numpy.reshape(REGION.astype(int),(-1,lat.size)) + # mp.contourf(lon.squeeze(), lat.squeeze(), REGION.T) + # mp.axes().set_aspect('equal') # ====================================================================== # convecTransBasic_calcTaveQsatInt -# takes in 3D tropospheric temperature fields and calculates tave & qsat_int +# takes in 3D tropospheric temperature fields and calculates tave & qsat_int # Calculations will be broken up into chunks of time-period corresponding -# to time_idx_delta with a default of 1000 time steps +# to time_idx_delta with a default of 1000 time steps # Definition of column can be changed through p_lev_bottom & p_lev_top, -# but the default filenames for tave & qsat_int do not contain column info +# but the default filenames for tave & qsat_int do not contain column info def convecTransBasic_calcTaveQsatInt(ta_netcdf_filename,TA_VAR,PRES_VAR,MODEL,\ p_lev_bottom,p_lev_top,dp,time_idx_delta,\ @@ -389,35 +393,35 @@ def convecTransBasic_calc_model(REGION,*argsv): LON_VAR = argsv[0] # Pre-process temperature field if necessary - if PREPROCESS_TA==1: + if PREPROCESS_TA == 1: print(" Start pre-processing atmospheric temperature fields...") for li in numpy.arange(len(pr_list)): - convecTransBasic_calcTaveQsatInt(ta_list[li],TA_VAR,PRES_VAR,MODEL,\ - p_lev_bottom,p_lev_top,dp,time_idx_delta,\ - SAVE_TAVE_QSAT_INT,PREPROCESSING_OUTPUT_DIR,\ + convecTransBasic_calcTaveQsatInt(ta_list[li],TA_VAR,PRES_VAR,MODEL, + p_lev_bottom,p_lev_top,dp,time_idx_delta, + SAVE_TAVE_QSAT_INT,PREPROCESSING_OUTPUT_DIR, TAVE_VAR,QSAT_INT_VAR,TIME_VAR,LAT_VAR,LON_VAR) # Re-load file lists for tave & qsat_int - tave_list=sorted(glob.glob(PREPROCESSING_OUTPUT_DIR+"/"+os.environ["tave_file"])) - qsat_int_list=sorted(glob.glob(PREPROCESSING_OUTPUT_DIR+"/"+os.environ["qsat_int_file"])) + tave_list = sorted(glob.glob(PREPROCESSING_OUTPUT_DIR+"/"+os.environ["tave_file"])) + qsat_int_list = sorted(glob.glob(PREPROCESSING_OUTPUT_DIR+"/"+os.environ["qsat_int_file"])) # Allocate Memory for Arrays for Binning Output # Define Bin Centers - cwv_bin_center=numpy.arange(CWV_BIN_WIDTH,CWV_RANGE_MAX+CWV_BIN_WIDTH,CWV_BIN_WIDTH) + cwv_bin_center = numpy.arange(CWV_BIN_WIDTH, CWV_RANGE_MAX + CWV_BIN_WIDTH, CWV_BIN_WIDTH) # Bulk Tropospheric Temperature Measure (1:tave, or 2:qsat_int) - if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: - tave_bin_center=numpy.arange(T_RANGE_MIN,T_RANGE_MAX+T_BIN_WIDTH,T_BIN_WIDTH) + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + tave_bin_center=numpy.arange(T_RANGE_MIN, T_RANGE_MAX+T_BIN_WIDTH, T_BIN_WIDTH) temp_bin_center=tave_bin_center temp_bin_width=T_BIN_WIDTH - elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2: - qsat_int_bin_center=numpy.arange(Q_RANGE_MIN,Q_RANGE_MAX+Q_BIN_WIDTH,Q_BIN_WIDTH) + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + qsat_int_bin_center=numpy.arange(Q_RANGE_MIN, Q_RANGE_MAX+Q_BIN_WIDTH, Q_BIN_WIDTH) temp_bin_center=qsat_int_bin_center temp_bin_width=Q_BIN_WIDTH - NUMBER_CWV_BIN=cwv_bin_center.size - NUMBER_TEMP_BIN=temp_bin_center.size - temp_offset=temp_bin_center[0]-0.5*temp_bin_width + NUMBER_CWV_BIN = cwv_bin_center.size + NUMBER_TEMP_BIN = temp_bin_center.size + temp_offset = temp_bin_center[0]-0.5*temp_bin_width # Allocate Memory for Arrays P0=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_CWV_BIN,NUMBER_TEMP_BIN)) @@ -436,76 +440,75 @@ def convecTransBasic_calc_model(REGION,*argsv): pr_netcdf=Dataset(pr_list[li],"r") lat=numpy.asarray(pr_netcdf.variables[LAT_VAR][:],dtype="float") - pr=numpy.squeeze(numpy.asarray(pr_netcdf.variables[PR_VAR][:,:,:],dtype="float")) + pr=numpy.squeeze(numpy.asarray(pr_netcdf.variables[PR_VAR][:, :, :], dtype="float")) pr_netcdf.close() # Units: mm/s --> mm/h pr=pr[:,numpy.logical_and(lat>=-20.0,lat<=20.0),:]*3.6e3 print(" "+pr_list[li]+" Loaded!") - prw_netcdf=Dataset(prw_list[li],"r") - lat=numpy.asarray(prw_netcdf.variables[LAT_VAR][:],dtype="float") - prw=numpy.squeeze(numpy.asarray(prw_netcdf.variables[PRW_VAR][:,:,:],dtype="float")) + prw_netcdf = Dataset(prw_list[li], "r") + lat = numpy.asarray(prw_netcdf.variables[LAT_VAR][:], dtype="float") + prw = numpy.squeeze(numpy.asarray(prw_netcdf.variables[PRW_VAR][:, :, :], dtype="float")) prw_netcdf.close() - prw=prw[:,numpy.logical_and(lat>=-20.0,lat<=20.0),:] + prw = prw[:, numpy.logical_and(lat >= -20.0, lat <= 20.0), :] print(" "+prw_list[li]+" Loaded!") qsat_int_netcdf=Dataset(qsat_int_list[li],"r") - lat=numpy.asarray(qsat_int_netcdf.variables[LAT_VAR][:],dtype="float") - qsat_int=numpy.squeeze(numpy.asarray(qsat_int_netcdf.variables[QSAT_INT_VAR][:,:,:],dtype="float")) + lat=numpy.asarray(qsat_int_netcdf.variables[LAT_VAR][:], dtype="float") + qsat_int=numpy.squeeze(numpy.asarray(qsat_int_netcdf.variables[QSAT_INT_VAR][:, :, :], dtype="float")) qsat_int_netcdf.close() - qsat_int=qsat_int[:,numpy.logical_and(lat>=-20.0,lat<=20.0),:] + qsat_int=qsat_int[:,numpy.logical_and(lat>=-20.0, lat<=20.0),:] print(" "+qsat_int_list[li]+" Loaded!") - if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: - tave_netcdf=Dataset(tave_list[li],"r") - lat=numpy.asarray(tave_netcdf.variables[LAT_VAR][:],dtype="float") - tave=numpy.squeeze(numpy.asarray(tave_netcdf.variables[TAVE_VAR][:,:,:],dtype="float")) + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + tave_netcdf=Dataset(tave_list[li], "r") + lat=numpy.asarray(tave_netcdf.variables[LAT_VAR][:], dtype="float") + tave=numpy.squeeze(numpy.asarray(tave_netcdf.variables[TAVE_VAR][:, :, :], dtype="float")) tave_netcdf.close() - tave=tave[:,numpy.logical_and(lat>=-20.0,lat<=20.0),:] + tave=tave[:, numpy.logical_and(lat>=-20.0, lat<=20.0),:] print(" "+tave_list[li]+" Loaded!") print(" Binning..."), - - ### Start binning + + # Start binning CWV=prw/CWV_BIN_WIDTH-0.5 CWV=CWV.astype(int) RAIN=pr - RAIN[RAIN<0]=0 # Sometimes models produce negative rain rates - QSAT_INT=qsat_int - if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: - TAVE=tave - temp=(TAVE-temp_offset)/temp_bin_width - elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2: - temp=(QSAT_INT-temp_offset)/temp_bin_width - temp=temp.astype(int) + RAIN[RAIN<0] = 0 # Sometimes models produce negative rain rates + QSAT_INT = qsat_int + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + TAVE = tave + temp = (TAVE-temp_offset)/temp_bin_width + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + temp = (QSAT_INT-temp_offset)/temp_bin_width + temp = temp.astype(int) # Binning is structured in the following way to avoid potential round-off issue # (an issue arise when the total number of events reaches about 1e+8) for lon_idx in numpy.arange(CWV.shape[2]): - p0=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_CWV_BIN,NUMBER_TEMP_BIN)) - p1=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_CWV_BIN,NUMBER_TEMP_BIN)) - p2=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_CWV_BIN,NUMBER_TEMP_BIN)) - pe=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_CWV_BIN,NUMBER_TEMP_BIN)) - if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: - q0=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_TEMP_BIN)) - q1=numpy.zeros((NUMBER_OF_REGIONS,NUMBER_TEMP_BIN)) - convecTransBasic_binTave(lon_idx, CWV_BIN_WIDTH, \ - NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUMBER_CWV_BIN, PRECIP_THRESHOLD, \ - REGION, CWV, RAIN, temp, QSAT_INT, \ + p0=numpy.zeros((NUMBER_OF_REGIONS, NUMBER_CWV_BIN, NUMBER_TEMP_BIN)) + p1=numpy.zeros((NUMBER_OF_REGIONS, NUMBER_CWV_BIN, NUMBER_TEMP_BIN)) + p2=numpy.zeros((NUMBER_OF_REGIONS, NUMBER_CWV_BIN, NUMBER_TEMP_BIN)) + pe=numpy.zeros((NUMBER_OF_REGIONS, NUMBER_CWV_BIN, NUMBER_TEMP_BIN)) + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + q0=numpy.zeros((NUMBER_OF_REGIONS, NUMBER_TEMP_BIN)) + q1=numpy.zeros((NUMBER_OF_REGIONS, NUMBER_TEMP_BIN)) + convecTransBasic_binTave(lon_idx, CWV_BIN_WIDTH, + NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUMBER_CWV_BIN, PRECIP_THRESHOLD, + REGION, CWV, RAIN, temp, QSAT_INT, p0, p1, p2, pe, q0, q1) - elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2: - convecTransBasic_binQsatInt(lon_idx, \ - NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUMBER_CWV_BIN, PRECIP_THRESHOLD, \ - REGION, CWV, RAIN, temp, \ - p0, p1, p2, pe) - P0+=p0 - P1+=p1 - P2+=p2 - PE+=pe - if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + convecTransBasic_binQsatInt(lon_idx, + NUMBER_OF_REGIONS, NUMBER_TEMP_BIN, NUMBER_CWV_BIN, PRECIP_THRESHOLD, + REGION, CWV, RAIN, temp, p0, p1, p2, pe) + P0 += p0 + P1 += p1 + P2 += p2 + PE += pe + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: Q0+=q0 Q1+=q1 # end-for lon_idx @@ -515,23 +518,23 @@ def convecTransBasic_calc_model(REGION,*argsv): print(" Total binning complete!") # Save Binning Results - bin_output_netcdf=Dataset(BIN_OUTPUT_DIR+"/"+BIN_OUTPUT_FILENAME+".nc","w",format="NETCDF4") + bin_output_netcdf = Dataset(BIN_OUTPUT_DIR +" /" + BIN_OUTPUT_FILENAME+".nc", "w", format="NETCDF4") - bin_output_netcdf.description="Convective Onset Statistics for "+MODEL - bin_output_netcdf.source="Convective Onset Statistics Diagnostic Package \ + bin_output_netcdf.description = "Convective Onset Statistics for "+MODEL + bin_output_netcdf.source = "Convective Onset Statistics Diagnostic Package \ - as part of the NOAA Model Diagnostic Task Force (MDTF) effort" - bin_output_netcdf.PRECIP_THRESHOLD=PRECIP_THRESHOLD + bin_output_netcdf.PRECIP_THRESHOLD = PRECIP_THRESHOLD - region=bin_output_netcdf.createDimension("region",NUMBER_OF_REGIONS) - reg=bin_output_netcdf.createVariable("region",numpy.float64,("region",)) - reg=numpy.arange(1,NUMBER_OF_REGIONS+1) + region = bin_output_netcdf.createDimension("region", NUMBER_OF_REGIONS) + reg = bin_output_netcdf.createVariable("region", numpy.float64, ("region",)) + reg = numpy.arange(1,NUMBER_OF_REGIONS+1) - cwv=bin_output_netcdf.createDimension("cwv",len(cwv_bin_center)) - prw=bin_output_netcdf.createVariable("cwv",numpy.float64,("cwv",)) + cwv = bin_output_netcdf.createDimension("cwv", len(cwv_bin_center)) + prw=bin_output_netcdf.createVariable("cwv", numpy.float64, ("cwv", )) prw.units="mm" - prw[:]=cwv_bin_center + prw[:] = cwv_bin_center - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: tave=bin_output_netcdf.createDimension(TAVE_VAR,len(tave_bin_center)) temp=bin_output_netcdf.createVariable(TAVE_VAR,numpy.float64,(TAVE_VAR,)) temp.units="K" @@ -558,52 +561,52 @@ def convecTransBasic_calc_model(REGION,*argsv): q1.units="mm" q1[:,:]=Q1 - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - qsat_int=bin_output_netcdf.createDimension(QSAT_INT_VAR,len(qsat_int_bin_center)) - temp=bin_output_netcdf.createVariable(QSAT_INT_VAR,numpy.float64,(QSAT_INT_VAR,)) - temp.units="mm" - temp[:]=qsat_int_bin_center + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + qsat_int = bin_output_netcdf.createDimension(QSAT_INT_VAR, len(qsat_int_bin_center)) + temp=bin_output_netcdf.createVariable(QSAT_INT_VAR, numpy.float64, (QSAT_INT_VAR,)) + temp.units = "mm" + temp[:] = qsat_int_bin_center - p0=bin_output_netcdf.createVariable("P0",numpy.float64,("region","cwv",QSAT_INT_VAR)) - p0[:,:,:]=P0 + p0 = bin_output_netcdf.createVariable("P0", numpy.float64, ("region", "cwv", QSAT_INT_VAR)) + p0[:,:,:] = P0 - p1=bin_output_netcdf.createVariable("P1",numpy.float64,("region","cwv",QSAT_INT_VAR)) - p1.units="mm/h" - p1[:,:,:]=P1 + p1 = bin_output_netcdf.createVariable("P1", numpy.float64, ("region", "cwv", QSAT_INT_VAR)) + p1.units = " mm/h" + p1[:, :, :] = P1 - p2=bin_output_netcdf.createVariable("P2",numpy.float64,("region","cwv",QSAT_INT_VAR)) + p2=bin_output_netcdf.createVariable("P2", numpy.float64, ("region", "cwv", QSAT_INT_VAR)) p2.units="mm^2/h^2" - p2[:,:,:]=P2 + p2[:,:,:] = P2 - pe=bin_output_netcdf.createVariable("PE",numpy.float64,("region","cwv",QSAT_INT_VAR)) - pe[:,:,:]=PE + pe=bin_output_netcdf.createVariable("PE", numpy.float64, ("region", "cwv", QSAT_INT_VAR)) + pe[:,:,:] = PE bin_output_netcdf.close() print(" Binned results saved as "+BIN_OUTPUT_DIR+"/"+BIN_OUTPUT_FILENAME+".nc!") - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - return cwv_bin_center,tave_bin_center,P0,P1,P2,PE,Q0,Q1,CWV_BIN_WIDTH,PRECIP_THRESHOLD - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - return cwv_bin_center,qsat_int_bin_center,P0,P1,P2,PE,[],[],CWV_BIN_WIDTH,PRECIP_THRESHOLD + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + return cwv_bin_center, tave_bin_center, P0, P1, P2, PE, Q0, Q1, CWV_BIN_WIDTH, PRECIP_THRESHOLD + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + return cwv_bin_center, qsat_int_bin_center, P0, P1, P2, PE, [], [], CWV_BIN_WIDTH, PRECIP_THRESHOLD # ====================================================================== # convecTransBasic_loadAnalyzedData # loads the binned output calculated from convecTransBasic_calc_model # and prepares it for plotting -def convecTransBasic_loadAnalyzedData(*argsv): - bin_output_list,\ - TAVE_VAR,\ - QSAT_INT_VAR,\ - BULK_TROPOSPHERIC_TEMPERATURE_MEASURE=argsv[0] +def convecTransBasic_loadAnalyzedData(*argsv): + bin_output_list, \ + TAVE_VAR, \ + QSAT_INT_VAR, \ + BULK_TROPOSPHERIC_TEMPERATURE_MEASURE = argsv[0] - if (len(bin_output_list)!=0): + if len(bin_output_list) != 0: bin_output_filename=bin_output_list[0] - if bin_output_filename.split('.')[-1]=='nc': - bin_output_netcdf=Dataset(bin_output_filename,"r") + if bin_output_filename.split('.')[-1] == 'nc': + bin_output_netcdf=Dataset(bin_output_filename, "r") cwv_bin_center=numpy.asarray(bin_output_netcdf.variables["cwv"][:],dtype="float") P0=numpy.asarray(bin_output_netcdf.variables["P0"][:,:,:],dtype="float") @@ -645,8 +648,13 @@ def convecTransBasic_loadAnalyzedData(*argsv): # user-specified parameters are consistent with existing data return cwv_bin_center,temp_bin_center,P0,P1,P2,PE,Q0,Q1,CWV_BIN_WIDTH,PRECIP_THRESHOLD - else: # If the binned model/obs data does not exist (in practive, for obs data only) - return numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]),numpy.array([]) + else: # If the binned model/obs data does not exist (in practice, for obs data only) + return (numpy.array([]), numpy.array([]), + numpy.array([]), numpy.array([]), + numpy.array([]), numpy.array([]), + numpy.array([]), numpy.array([]), + numpy.array([]), numpy.array([]) + ) # ====================================================================== # convecTransBasic_plot @@ -670,7 +678,7 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): PT=ret # Load plotting parameters from convecTransBasic_usp_plot.py - fig_params=argsv1 + fig_params = argsv1 # Load parameters from convecTransBasic_usp_calc.py # Checking CWV_BIN_WIDTH & PRECIP_THRESHOLD @@ -710,25 +718,25 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): # Check whether the detected binned MODEL data is consistent with User-Specified Parameters # (Not all parameters, just 3) - if (CBW!=CWV_BIN_WIDTH): - print("==> Caution! The detected binned output has a CWV_BIN_WIDTH value "\ - +"different from the value specified in convecTransBasic_usp_calc.py!") - if (PT!=PRECIP_THRESHOLD): - print("==> Caution! The detected binned output has a PRECIP_THRESHOLD value "\ - +"different from the value specified in convecTransBasic_usp_calc.py!") - if (P0.shape[0]!=NUMBER_OF_REGIONS): - print("==> Caution! The detected binned output has a NUMBER_OF_REGIONS "\ - +"different from the value specified in convecTransBasic_usp_calc.py!") - if (CBW!=CWV_BIN_WIDTH or PT!=PRECIP_THRESHOLD or P0.shape[0]!=NUMBER_OF_REGIONS): - print("Caution! The detected binned output is inconsistent with "\ - +"User-Specified Parameter(s) defined in convecTransBasic_usp_calc.py!") - print(" Please double-check convecTransBasic_usp_calc.py, "\ - +"or if the required MODEL output exist, set BIN_ANYWAY=True "\ - +"in convecTransBasic_usp_calc.py!") - - ### Process/Plot binned OBS data - # if the binned OBS data exists, checking by P0_obs==[] - if (P0_obs.size!=0): + if CBW != CWV_BIN_WIDTH: + print("==> Caution! The detected binned output has a CWV_BIN_WIDTH value " + \ + "different from the value specified in convecTransBasic_usp_calc.py!") + if PT != PRECIP_THRESHOLD: + print("==> Caution! The detected binned output has a PRECIP_THRESHOLD value " + \ + "different from the value specified in convecTransBasic_usp_calc.py!") + if P0.shape[0] != NUMBER_OF_REGIONS: + print("==> Caution! The detected binned output has a NUMBER_OF_REGIONS " + \ + "different from the value specified in convecTransBasic_usp_calc.py!") + if CBW != CWV_BIN_WIDTH or PT != PRECIP_THRESHOLD or P0.shape[0] != NUMBER_OF_REGIONS: + print("Caution! The detected binned output is inconsistent with " + \ + "User-Specified Parameter(s) defined in convecTransBasic_usp_calc.py!") + print("Please double-check convecTransBasic_usp_calc.py, " + \ + "or if the required MODEL output exist, set BIN_ANYWAY=True " + \ + "in convecTransBasic_usp_calc.py!") + + # Process/Plot binned OBS data + # if the binned OBS data exists, checki g by P0_obs==[] + if P0_obs.size != 0: # Post-binning Processing before Plotting P0_obs[P0_obs==0.0]=numpy.nan P_obs=P1_obs/P0_obs @@ -810,10 +818,10 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): print(" Plotting OBS Figure..."), # create figure canvas - fig_obs = mp.figure(figsize=(figsize1,figsize2)) + fig_obs = mp.figure(figsize=(figsize1, figsize2)) - #title_text=fig_obs.text(s='Convective Transition Basic Statistics ('+OBS+', '+RES+'$^{\circ}$)', x=0.5, y=1.02, ha='center', va='bottom', transform=fig_obs.transFigure, fontsize=16) - fig_obs.suptitle('Convective Transition Basic Statistics ('+OBS+', '+RES+'$^{\circ}$)', y=1.04, fontsize=16) ###Change y=1.04 to 1.02 for Python3. + fig_obs.suptitle('Convective Transition Basic Statistics ('+OBS+', '+RES+'$^{\circ}$)', + y=1.04, fontsize=16) #Change y=1.04 to 1.02 for Python3. for reg in numpy.arange(NUMBER_OF_REGIONS): # create figure 1 @@ -839,14 +847,15 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): for Tidx in numpy.arange(TEMP_MIN_obs,TEMP_MAX_obs+1): if t_reg_I_obs[reg,Tidx]: if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax1.scatter(Q1_obs[reg,Tidx]/Q0_obs[reg,Tidx],fig_params['f1'][1][1]*0.98,\ - edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ - s=marker_size,clip_on=True,zorder=3,marker="^",\ + ax1.scatter(Q1_obs[reg,Tidx]/Q0_obs[reg,Tidx],fig_params['f1'][1][1]*0.98, + edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], + s=marker_size,clip_on=True,zorder=3,marker="^", label=': $\widehat{q_{sat}}$ (Column-integrated Saturation Specific Humidity)') elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax1.scatter(temp_bin_center_obs[Tidx],fig_params['f1'][1][1]*0.98,\ - edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ - s=marker_size,clip_on=True,zorder=3,marker="^",\ + ax1.scatter(temp_bin_center_obs[Tidx],fig_params['f1'][1][1]*0.98, + edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2, + facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], + s=marker_size,clip_on=True,zorder=3,marker="^", label=': $\widehat{q_{sat}}$ (Column-integrated Saturation Specific Humidity)') ax1.set_xlabel(fig_params['f1'][2], fontsize=axes_fontsize) ax1.set_ylabel(fig_params['f1'][3], fontsize=axes_fontsize) @@ -855,14 +864,15 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): handles, labels = ax1.get_legend_handles_labels() num_handles = sum(t_reg_I_obs[reg,:]) - leg = ax1.legend(handles[0:num_handles], labels[0:num_handles], fontsize=axes_fontsize, bbox_to_anchor=(0.05,0.95), \ - bbox_transform=ax1.transAxes, loc="upper left", borderaxespad=0, labelspacing=0.1, \ - fancybox=False,scatterpoints=1, framealpha=0, borderpad=0, \ - handletextpad=0.1, markerscale=1, ncol=1, columnspacing=0.25) + leg = ax1.legend(handles[0:num_handles], labels[0:num_handles], fontsize=axes_fontsize, + bbox_to_anchor=(0.05, 0.95), + bbox_transform=ax1.transAxes, loc="upper left", borderaxespad=0, labelspacing=0.1, + fancybox=False,scatterpoints=1, framealpha=0, borderpad=0, + handletextpad=0.1, markerscale=1, ncol=1, columnspacing=0.25) ax1.add_artist(leg) if reg==0: - #ax1_text = ax1.text(s='Precip. cond. avg. on CWV', x=0.5, y=1.05, transform=ax1.transAxes, fontsize=12, ha='center', va='bottom') - ax1.text(s='Precip. cond. avg. on CWV', x=0.5, y=1.05, transform=ax1.transAxes, fontsize=12, ha='center', va='bottom') + ax1.text(s='Precip. cond. avg. on CWV', x=0.5, y=1.05, transform=ax1.transAxes, fontsize=12, + ha='center', va='bottom') # create figure 2 (probability pickup) ax2 = fig_obs.add_subplot(NUMBER_OF_REGIONS,4,2+reg*NUMBER_OF_REGIONS) @@ -872,29 +882,32 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): ax2.set_yticks(fig_params['f2'][5]) ax2.tick_params(labelsize=axes_fontsize) ax2.tick_params(axis="x", pad=xtick_pad) - for Tidx in numpy.arange(TEMP_MIN_obs,TEMP_MAX_obs+1): - if t_reg_I_obs[reg,Tidx]: - ax2.scatter(cwv_bin_center_obs,cp_obs[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ - s=marker_size,clip_on=True,zorder=3) - for Tidx in numpy.arange(TEMP_MIN_obs,TEMP_MAX_obs+1): + for Tidx in numpy.arange(TEMP_MIN_obs, TEMP_MAX_obs + 1): if t_reg_I_obs[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax2.scatter(Q1_obs[reg,Tidx]/Q0_obs[reg,Tidx],fig_params['f2'][1][1]*0.98,\ - edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ - s=marker_size,clip_on=True,zorder=3,marker="^") - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax2.scatter(temp_bin_center_obs[Tidx],fig_params['f2'][1][1]*0.98,\ - edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ + ax2.scatter(cwv_bin_center_obs,cp_obs[reg, :, Tidx], + edgecolor="none",facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], + s=marker_size, clip_on=True,zorder=3) + for Tidx in numpy.arange(TEMP_MIN_obs,TEMP_MAX_obs + 1): + if t_reg_I_obs[reg, Tidx]: + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax2.scatter(Q1_obs[reg, Tidx]/Q0_obs[reg, Tidx], fig_params['f2'][1][1]*0.98, + edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2, + facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], s=marker_size,clip_on=True,zorder=3,marker="^") + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + ax2.scatter(temp_bin_center_obs[Tidx],fig_params['f2'][1][1]*0.98, + edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2, + facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], + s=marker_size, clip_on=True, zorder=3, marker="^") ax2.set_xlabel(fig_params['f2'][2], fontsize=axes_fontsize) ax2.set_ylabel(fig_params['f2'][3], fontsize=axes_fontsize) - ax2.text(0.05, 0.95, REGION_STR_OBS[reg], transform=ax2.transAxes, fontsize=12, fontweight="bold", verticalalignment="top") + ax2.text(0.05, 0.95, REGION_STR_OBS[reg], transform=ax2.transAxes, fontsize=12, fontweight="bold", + verticalalignment="top") ax2.grid() ax2.set_axisbelow(True) if reg==0: - #ax2_text = ax2.text(s='Prob. of Precip.>'+str(PT_obs)+'mm/h', x=0.5, y=1.05, transform=ax2.transAxes, fontsize=12, ha='center', va='bottom') - ax2.text(s='Prob. of Precip.>'+str(PT_obs)+'mm/h', x=0.5, y=1.05, transform=ax2.transAxes, fontsize=12, ha='center', va='bottom') + ax2.text(s='Prob. of Precip.>' + str(PT_obs) + 'mm/h', x=0.5, y=1.05, transform=ax2.transAxes, + fontsize=12, ha='center', va='bottom') # create figure 3 (normalized PDF) ax3 = fig_obs.add_subplot(NUMBER_OF_REGIONS,4,3+reg*NUMBER_OF_REGIONS) @@ -904,27 +917,28 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): ax3.set_xticks(fig_params['f3'][4]) ax3.tick_params(labelsize=axes_fontsize) ax3.tick_params(axis="x", pad=xtick_pad) + for Tidx in numpy.arange(TEMP_MIN_obs, TEMP_MAX_obs + 1): + if t_reg_I_obs[reg, Tidx]: + ax3.scatter(cwv_bin_center_obs,PDF_obs[reg, :, Tidx], + edgecolor="none",facecolor=scatter_colors[Tidx-TEMP_MIN_obs, :], + s=marker_size, clip_on=True, zorder=3) for Tidx in numpy.arange(TEMP_MIN_obs,TEMP_MAX_obs+1): if t_reg_I_obs[reg,Tidx]: - ax3.scatter(cwv_bin_center_obs,PDF_obs[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ - s=marker_size,clip_on=True,zorder=3) - for Tidx in numpy.arange(TEMP_MIN_obs,TEMP_MAX_obs+1): - if t_reg_I_obs[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax3.scatter(Q1_obs[reg,Tidx]/Q0_obs[reg,Tidx],fig_params['f3'][1][1]*0.83,\ - edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax3.scatter(Q1_obs[reg,Tidx]/Q0_obs[reg,Tidx],fig_params['f3'][1][1]*0.83, + edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2, + facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], s=marker_size,clip_on=True,zorder=3,marker="^") - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax3.scatter(temp_bin_center_obs[Tidx],fig_params['f3'][1][1]*0.83,\ - edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2,facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:],\ + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + ax3.scatter(temp_bin_center_obs[Tidx],fig_params['f3'][1][1]*0.83, + edgecolor=scatter_colors[Tidx-TEMP_MIN_obs,:]/2, + facecolor=scatter_colors[Tidx-TEMP_MIN_obs,:], s=marker_size,clip_on=True,zorder=3,marker="^") ax3.set_xlabel(fig_params['f3'][2], fontsize=axes_fontsize) ax3.set_ylabel(fig_params['f3'][3], fontsize=axes_fontsize) ax3.grid() ax3.set_axisbelow(True) if reg==0: - #ax3_text = ax3.text(s='PDF of CWV', x=0.5, y=1.05, transform=ax3.transAxes, fontsize=12, ha='center', va='bottom') ax3.text(s='PDF of CWV', x=0.5, y=1.05, transform=ax3.transAxes, fontsize=12, ha='center', va='bottom') # create figure 4 (normalized PDF - precipitation) @@ -952,20 +966,24 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): s=marker_size,clip_on=True,zorder=3,marker="^") ax4.set_xlabel(fig_params['f4'][2], fontsize=axes_fontsize) ax4.set_ylabel(fig_params['f4'][3], fontsize=axes_fontsize) - ax4.text(0.05, 0.95, "Precip > "+str(PT_obs)+" mm h$^-$$^1$" , transform=ax4.transAxes, fontsize=12, verticalalignment="top") + ax4.text(0.05, 0.95, "Precip > "+str(PT_obs)+" mm h$^-$$^1$", + transform=ax4.transAxes, fontsize=12, verticalalignment="top") ax4.grid() ax4.set_axisbelow(True) - if reg==0: - #ax4_text = ax4.text(s='PDF of CWV for Precip.>'+str(PT_obs)+'mm/h', x=0.49, y=1.05, transform=ax4.transAxes, fontsize=12, ha='center', va='bottom') - ax4.text(s='PDF of CWV for Precip.>'+str(PT_obs)+'mm/h', x=0.49, y=1.05, transform=ax4.transAxes, fontsize=12, ha='center', va='bottom') - - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - temp_str='$\widehat{T}$ (1000-200hPa Mass-weighted Column Average Temperature) used as the bulk tropospheric temperature measure' - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - temp_str='$\widehat{q_{sat}}$ (1000-200hPa Column-integrated Saturation Specific Humidity) used as the bulk tropospheric temperature measure' + if reg == 0: + ax4.text(s='PDF of CWV for Precip.>'+str(PT_obs)+'mm/h', x=0.49, y=1.05, + transform=ax4.transAxes, fontsize=12, ha='center', va='bottom') + + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + temp_str = '$\widehat{T}$ (1000-200hPa Mass-weighted Column Average Temperature)'\ + ' used as the bulk tropospheric temperature measure' + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + temp_str = '$\widehat{q_{sat}}$ (1000-200hPa Column-integrated Saturation Specific Humidity)'\ + 'used as the bulk tropospheric temperature measure' fig_obs.text(s=temp_str, x=0, y=0, ha='left', va='top', transform=fig_obs.transFigure, fontsize=12) - triag_qsat_str = '$\Delta$: $\widehat{q_{sat}}$ (1000-200hPa Column-integrated Saturation Specific Humidity; Units: mm)' + triag_qsat_str = '$\Delta$: $\widehat{q_{sat}}$ (1000-200hPa Column-integrated Saturation Specific Humidity;'\ + ' Units: mm)' fig_obs.text(s=triag_qsat_str, x=0, y=-0.02, ha='left', va='top', transform=fig_obs.transFigure, fontsize=12) # set layout to tight (so that space between figures is minimized) @@ -977,10 +995,10 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): # ====================================================================== # =======================End Plot OBS Binned Data======================= # ====================================================================== - ### End of Process/Plot binned OBS data + # End of Process/Plot binned OBS data # Post-binning Processing before Plotting - P0[P0==0.0]=numpy.nan + P0[P0 == 0.0] = numpy.nan P=P1/P0 CP=PE/P0 PDF=numpy.zeros(P0.shape) @@ -992,7 +1010,7 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): pdf_gt_th[PDF>PDF_THRESHOLD]=1 # Indicator of (temp,reg) with wide CWV range - t_reg_I=(numpy.squeeze(numpy.sum(pdf_gt_th,axis=1))*CBW>CWV_RANGE_THRESHOLD) + t_reg_I = (numpy.squeeze(numpy.sum(pdf_gt_th,axis=1))*CBW>CWV_RANGE_THRESHOLD) ### Connected Component Section # The CWV_RANGE_THRESHOLD-Criterion must be satisfied by a connected component @@ -1017,18 +1035,18 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): # else: # t_reg_I[reg,Tidx]=False # #pdf_gt_th[reg,:,Tidx]=0 - ### End of Connected Component Section +# End of Connected Component Section # Copy P1, CP into p1, cp for (temp,reg) with "wide CWV range" & "large PDF" p1=numpy.zeros(P1.shape) cp=numpy.zeros(CP.shape) for reg in numpy.arange(P1.shape[0]): for Tidx in numpy.arange(P1.shape[2]): - if t_reg_I[reg,Tidx]: - p1[reg,:,Tidx]=numpy.copy(P[reg,:,Tidx]) - cp[reg,:,Tidx]=numpy.copy(CP[reg,:,Tidx]) - p1[pdf_gt_th==0]=numpy.nan - cp[pdf_gt_th==0]=numpy.nan + if t_reg_I[reg, Tidx]: + p1[reg, :,Tidx]=numpy.copy(P[reg, :, Tidx]) + cp[reg, :, Tidx]=numpy.copy(CP[reg, :, Tidx]) + p1[pdf_gt_th == 0] = numpy.nan + cp[pdf_gt_th == 0] = numpy.nan pdf=numpy.copy(PDF) for reg in numpy.arange(P1.shape[0]): @@ -1037,21 +1055,21 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): if (numpy.max(cp[reg,:,Tidx][cp[reg,:,Tidx]>=0])=1)[0][0] - TEMP_MAX=numpy.where(numpy.sum(t_reg_I,axis=0)>=1)[0][-1] + TEMP_MIN = numpy.where(numpy.sum(t_reg_I, axis=0) >= 1)[0][0] + TEMP_MAX = numpy.where(numpy.sum(t_reg_I, axis=0) >= 1)[0][-1] # Use OBS to set colormap (but if they don't exist or users don't want to...) - if (P0_obs.size==0 or USE_SAME_COLOR_MAP==False): + if P0_obs.size == 0 or not USE_SAME_COLOR_MAP: TEMP_MIN_obs=TEMP_MIN TEMP_MAX_obs=TEMP_MAX @@ -1063,54 +1081,53 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): axes_fontsize,legend_fonsize,marker_size,xtick_pad,figsize1,figsize2 = fig_params['f0'] - print(" Plotting MODEL Figure..."), + print("Plotting MODEL Figure..."), # create figure canvas - fig = mp.figure(figsize=(figsize1,figsize2)) - - #title_text=fig.text(s='Convective Transition Basic Statistics ('+MODEL+')', x=0.5, y=1.02, ha='center', va='bottom', transform=fig.transFigure, fontsize=16) - fig.suptitle('Convective Transition Basic Statistics ('+MODEL+')', y=1.04, fontsize=16) ###Change y=1.04 to 1.02 for Python3. + fig = mp.figure(figsize=(figsize1, figsize2)) + fig.suptitle('Convective Transition Basic Statistics (' + MODEL + ')', y=1.04, fontsize=16) for reg in numpy.arange(NUMBER_OF_REGIONS): # create figure 1 - ax1 = fig.add_subplot(NUMBER_OF_REGIONS,4,1+reg*NUMBER_OF_REGIONS) + ax1 = fig.add_subplot(NUMBER_OF_REGIONS, 4, 1 + reg*NUMBER_OF_REGIONS) ax1.set_xlim(fig_params['f1'][0]) ax1.set_ylim(fig_params['f1'][1]) ax1.set_xticks(fig_params['f1'][4]) ax1.set_yticks(fig_params['f1'][5]) ax1.tick_params(labelsize=axes_fontsize) ax1.tick_params(axis="x", pad=10) - for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): + for Tidx in numpy.arange(TEMP_MIN, TEMP_MAX+1): if t_reg_I[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax1.scatter(cwv_bin_center,p1[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=3,\ + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax1.scatter(cwv_bin_center,p1[reg, :, Tidx], + edgecolor="none", facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size, clip_on=True, zorder=3, label="{:.0f}".format(temp_bin_center[Tidx])) - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax1.scatter(cwv_bin_center,p1[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=3,\ + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + ax1.scatter(cwv_bin_center, p1[reg, :, Tidx], + edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size, clip_on=True, zorder=3, label="{:.1f}".format(temp_bin_center[Tidx])) - for Tidx in numpy.arange(min(TEMP_MIN_obs,TEMP_MIN),max(TEMP_MAX_obs+1,TEMP_MAX+1)): - if (OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and \ - P0_obs.size!=0 and t_reg_I_obs[reg,Tidx]): - ax1.scatter(cwv_bin_center_obs,p1_obs[reg,:,Tidx],\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,\ - facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size/5,clip_on=True,zorder=3,\ - label='Statistics for '+OBS+' (spatial resolution: '+RES+'$^{\circ}$)') - for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): - if t_reg_I[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax1.scatter(Q1[reg,Tidx]/Q0[reg,Tidx],fig_params['f1'][1][1]*0.98,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=4,marker="^",\ + for Tidx in numpy.arange(min(TEMP_MIN_obs, TEMP_MIN), max(TEMP_MAX_obs+1, TEMP_MAX+1)): + if OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and P0_obs.size != 0 and t_reg_I_obs[reg, Tidx]: + ax1.scatter(cwv_bin_center_obs,p1_obs[reg,:,Tidx], + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:], + s=marker_size/5, clip_on=True, zorder=3, + label='Statistics for ' + OBS + ' (spatial resolution: ' + RES+ '$^{\circ}$)') + for Tidx in numpy.arange(TEMP_MIN, TEMP_MAX+1): + if t_reg_I[reg, Tidx]: + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax1.scatter(Q1[reg,Tidx]/Q0[reg, Tidx], fig_params['f1'][1][1]*0.98, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:], + s=marker_size,clip_on=True,zorder=4,marker="^", label=': $\widehat{q_{sat}}$ (Column-integrated Saturation Specific Humidity)') elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax1.scatter(temp_bin_center[Tidx],fig_params['f1'][1][1]*0.98,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=4,marker="^",\ + ax1.scatter(temp_bin_center[Tidx],fig_params['f1'][1][1]*0.98, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:], + s=marker_size,clip_on=True, zorder=4, marker="^", label=': $\widehat{q_{sat}}$ (Column-integrated Saturation Specific Humidity)') ax1.set_xlabel(fig_params['f1'][2], fontsize=axes_fontsize) ax1.set_ylabel(fig_params['f1'][3], fontsize=axes_fontsize) @@ -1119,14 +1136,15 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): handles, labels = ax1.get_legend_handles_labels() num_handles = sum(t_reg_I[reg,:]) - leg = ax1.legend(handles[0:num_handles], labels[0:num_handles], fontsize=axes_fontsize, bbox_to_anchor=(0.05,0.95), \ - bbox_transform=ax1.transAxes, loc="upper left", borderaxespad=0, labelspacing=0.1, \ - fancybox=False, scatterpoints=1, framealpha=0, borderpad=0, \ - handletextpad=0.1, markerscale=1, ncol=1, columnspacing=0.25) + leg = ax1.legend(handles[0:num_handles], labels[0:num_handles], fontsize=axes_fontsize, + bbox_to_anchor=(0.05,0.95), + bbox_transform=ax1.transAxes, loc="upper left", borderaxespad=0, labelspacing=0.1, + fancybox=False, scatterpoints=1, framealpha=0, borderpad=0, + handletextpad=0.1, markerscale=1, ncol=1, columnspacing=0.25) ax1.add_artist(leg) if reg==0: - #ax1_text = ax1.text(s='Precip. cond. avg. on CWV', x=0.5, y=1.05, transform=ax1.transAxes, fontsize=12, ha='center', va='bottom') - ax1.text(s='Precip. cond. avg. on CWV', x=0.5, y=1.05, transform=ax1.transAxes, fontsize=12, ha='center', va='bottom') + ax1.text(s='Precip. cond. avg. on CWV', x=0.5, y=1.05, transform=ax1.transAxes, fontsize=12, + ha='center', va='bottom') # create figure 2 (probability pickup) ax2 = fig.add_subplot(NUMBER_OF_REGIONS,4,2+reg*NUMBER_OF_REGIONS) @@ -1137,34 +1155,37 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): ax2.tick_params(labelsize=axes_fontsize) ax2.tick_params(axis="x", pad=xtick_pad) for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): - if t_reg_I[reg,Tidx]: - ax2.scatter(cwv_bin_center,cp[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ + if t_reg_I[reg, Tidx]: + ax2.scatter(cwv_bin_center,cp[reg,:,Tidx], + edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], s=marker_size,clip_on=True,zorder=3) - for Tidx in numpy.arange(min(TEMP_MIN_obs,TEMP_MIN),max(TEMP_MAX_obs+1,TEMP_MAX+1)): - if (OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and \ - P0_obs.size!=0 and t_reg_I_obs[reg,Tidx]): - ax2.scatter(cwv_bin_center_obs,cp_obs[reg,:,Tidx],\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,\ - facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size/5,clip_on=True,zorder=3) - for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): - if t_reg_I[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax2.scatter(Q1[reg,Tidx]/Q0[reg,Tidx],fig_params['f2'][1][1]*0.98,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=4,marker="^") - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax2.scatter(temp_bin_center[Tidx],fig_params['f2'][1][1]*0.98,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ + for Tidx in numpy.arange(min(TEMP_MIN_obs,TEMP_MIN), max(TEMP_MAX_obs+1,TEMP_MAX+1)): + if OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and P0_obs.size != 0 and t_reg_I_obs[reg, Tidx]: + ax2.scatter(cwv_bin_center_obs, cp_obs[reg,:,Tidx], + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size/5, clip_on=True, zorder=3) + for Tidx in numpy.arange(TEMP_MIN, TEMP_MAX+1): + if t_reg_I[reg, Tidx]: + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax2.scatter(Q1[reg,Tidx]/Q0[reg,Tidx], fig_params['f2'][1][1]*0.98, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], s=marker_size,clip_on=True,zorder=4,marker="^") + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + ax2.scatter(temp_bin_center[Tidx], fig_params['f2'][1][1]*0.98, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:], + s=marker_size,clip_on=True, zorder=4, marker="^") ax2.set_xlabel(fig_params['f2'][2], fontsize=axes_fontsize) ax2.set_ylabel(fig_params['f2'][3], fontsize=axes_fontsize) - ax2.text(0.05, 0.95, REGION_STR[reg], transform=ax2.transAxes, fontsize=12, fontweight="bold", verticalalignment="top") + ax2.text(0.05, 0.95, REGION_STR[reg], transform=ax2.transAxes, fontsize=12, fontweight="bold", + verticalalignment="top") ax2.grid() ax2.set_axisbelow(True) - if reg==0: - ax2_text = ax2.text(s='Prob. of Precip.>'+str(PT)+'mm/h', x=0.5, y=1.05, transform=ax2.transAxes, fontsize=12, ha='center', va='bottom') + if reg == 0: + ax2_text = ax2.text(s='Prob. of Precip.>'+str(PT)+'mm/h', x=0.5, y=1.05, + transform=ax2.transAxes, fontsize=12, ha='center', va='bottom') # create figure 3 (normalized PDF) ax3 = fig.add_subplot(NUMBER_OF_REGIONS,4,3+reg*NUMBER_OF_REGIONS) @@ -1174,86 +1195,91 @@ def convecTransBasic_plot(ret,argsv1,argsv2,*argsv3): ax3.set_xticks(fig_params['f3'][4]) ax3.tick_params(labelsize=axes_fontsize) ax3.tick_params(axis="x", pad=xtick_pad) - for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): - if t_reg_I[reg,Tidx]: - ax3.scatter(cwv_bin_center,PDF[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ + for Tidx in numpy.arange(TEMP_MIN, TEMP_MAX+1): + if t_reg_I[reg, Tidx]: + ax3.scatter(cwv_bin_center,PDF[reg,:,Tidx], + edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], s=marker_size,clip_on=True,zorder=3) - for Tidx in numpy.arange(min(TEMP_MIN_obs,TEMP_MIN),max(TEMP_MAX_obs+1,TEMP_MAX+1)): - if (OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and \ - P0_obs.size!=0 and t_reg_I_obs[reg,Tidx]): - ax3.scatter(cwv_bin_center_obs,PDF_obs[reg,:,Tidx],\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,\ - facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size/5,clip_on=True,zorder=3) + for Tidx in numpy.arange(min(TEMP_MIN_obs, TEMP_MIN),max(TEMP_MAX_obs+1, TEMP_MAX+1)): + if OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and P0_obs.size != 0 and t_reg_I_obs[reg, Tidx]: + ax3.scatter(cwv_bin_center_obs,PDF_obs[reg,:,Tidx], + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size/5, clip_on=True, zorder=3) for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): if t_reg_I[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax3.scatter(Q1[reg,Tidx]/Q0[reg,Tidx],fig_params['f3'][1][1]*0.83,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=4,marker="^") - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax3.scatter(temp_bin_center[Tidx],fig_params['f3'][1][1]*0.83,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax3.scatter(Q1[reg, Tidx]/Q0[reg, Tidx], fig_params['f3'][1][1]*0.83, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], s=marker_size,clip_on=True,zorder=4,marker="^") + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + ax3.scatter(temp_bin_center[Tidx],fig_params['f3'][1][1]*0.83, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size, clip_on=True, zorder=4, marker="^") ax3.set_xlabel(fig_params['f3'][2], fontsize=axes_fontsize) ax3.set_ylabel(fig_params['f3'][3], fontsize=axes_fontsize) ax3.grid() ax3.set_axisbelow(True) - if reg==0: - ax3_text = ax3.text(s='PDF of CWV', x=0.5, y=1.05, transform=ax3.transAxes, fontsize=12, ha='center', va='bottom') + if reg == 0: + ax3_text = ax3.text(s='PDF of CWV', x=0.5, y=1.05, transform=ax3.transAxes, fontsize=12, + ha='center', va='bottom') # create figure 4 (normalized PDF - precipitation) - ax4 = fig.add_subplot(NUMBER_OF_REGIONS,4,4+reg*NUMBER_OF_REGIONS) + ax4 = fig.add_subplot(NUMBER_OF_REGIONS, 4, 4 + reg*NUMBER_OF_REGIONS) ax4.set_yscale("log") ax4.set_xlim(fig_params['f4'][0]) ax4.set_ylim(fig_params['f4'][1]) ax4.set_xticks(fig_params['f4'][4]) ax4.tick_params(labelsize=axes_fontsize) ax4.tick_params(axis="x", pad=xtick_pad) - for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): - if t_reg_I[reg,Tidx]: - ax4.scatter(cwv_bin_center,pdf_pe[reg,:,Tidx],\ - edgecolor="none",facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=3) - for Tidx in numpy.arange(min(TEMP_MIN_obs,TEMP_MIN),max(TEMP_MAX_obs+1,TEMP_MAX+1)): - if (OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and \ - P0_obs.size!=0 and t_reg_I_obs[reg,Tidx]): - ax4.scatter(cwv_bin_center_obs,pdf_pe_obs[reg,:,Tidx],\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,\ - facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size/5,clip_on=True,zorder=3) - for Tidx in numpy.arange(TEMP_MIN,TEMP_MAX+1): + for Tidx in numpy.arange(TEMP_MIN, TEMP_MAX+1): + if t_reg_I[reg, Tidx]: + ax4.scatter(cwv_bin_center,pdf_pe[reg, :, Tidx], + edgecolor="none", facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size, clip_on=True, zorder=3) + for Tidx in numpy.arange(min(TEMP_MIN_obs,TEMP_MIN), max(TEMP_MAX_obs+1,TEMP_MAX+1)): + if OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and P0_obs.size != 0 and t_reg_I_obs[reg, Tidx]: + ax4.scatter(cwv_bin_center_obs, pdf_pe_obs[reg, :, Tidx], + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size/5, clip_on=True, zorder=3) + for Tidx in numpy.arange(TEMP_MIN, TEMP_MAX + 1): if t_reg_I[reg,Tidx]: - if (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1): - ax4.scatter(Q1[reg,Tidx]/Q0[reg,Tidx],fig_params['f4'][1][1]*0.83,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=4,marker="^") - elif (BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==2): - ax4.scatter(temp_bin_center[Tidx],fig_params['f4'][1][1]*0.83,\ - edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2,facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:],\ - s=marker_size,clip_on=True,zorder=4,marker="^") + if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 1: + ax4.scatter(Q1[reg,Tidx]/Q0[reg,Tidx],fig_params['f4'][1][1]*0.83, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC,:], + s=marker_size, clip_on=True, zorder=4, marker="^") + elif BULK_TROPOSPHERIC_TEMPERATURE_MEASURE == 2: + ax4.scatter(temp_bin_center[Tidx], fig_params['f4'][1][1]*0.83, + edgecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :]/2, + facecolor=scatter_colors[(Tidx-TEMP_MIN_obs)%NoC, :], + s=marker_size, clip_on=True, zorder=4, marker="^") ax4.set_xlabel(fig_params['f4'][2], fontsize=axes_fontsize) ax4.set_ylabel(fig_params['f4'][3], fontsize=axes_fontsize) - ax4.text(0.05, 0.95, "Precip > "+str(PT)+" mm h$^-$$^1$" , transform=ax4.transAxes, fontsize=12, verticalalignment="top") + ax4.text(0.05, 0.95, "Precip > "+str(PT)+" mm h$^-$$^1$", transform=ax4.transAxes, fontsize=12, + verticalalignment="top") ax4.grid() ax4.set_axisbelow(True) - if reg==0: - #ax4_text = ax4.text(s='PDF of CWV for Precip.>'+str(PT)+'mm/h', x=0.49, y=1.05, transform=ax4.transAxes, fontsize=12, ha='center', va='bottom') - ax4.text(s='PDF of CWV for Precip.>'+str(PT)+'mm/h', x=0.49, y=1.05, transform=ax4.transAxes, fontsize=12, ha='center', va='bottom') + if reg == 0: + ax4.text(s='PDF of CWV for Precip.>' + str(PT) + 'mm/h', x=0.49, y=1.05, transform=ax4.transAxes, fontsize=12, + ha='center', va='bottom') fig.text(s=temp_str, x=0, y=0, ha='left', va='top', transform=fig.transFigure, fontsize=12) fig.text(s=triag_qsat_str, x=0, y=-0.02, ha='left', va='top', transform=fig.transFigure, fontsize=12) - if (OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and P0_obs.size!=0): - fig.text(s='$\circ$: OBS ('+OBS+', '+RES+'$^{\circ}$)', x=0, y=-0.04, ha='left', va='top', transform=fig.transFigure, fontsize=12) + if OVERLAY_OBS_ON_TOP_OF_MODEL_FIG and P0_obs.size != 0: + fig.text(s='$\circ$: OBS (' + OBS + ', ' + RES + '$^{\circ}$)', x=0, y=-0.04, ha='left', va='top', + transform=fig.transFigure, fontsize=12) # set layout to tight (so that space between figures is minimized) fig.tight_layout() - fig.savefig(FIG_OUTPUT_DIR+"/"+FIG_OUTPUT_FILENAME, bbox_inches="tight") + fig.savefig(FIG_OUTPUT_DIR + "/" + FIG_OUTPUT_FILENAME, bbox_inches="tight") print("...Completed!") - print(" Figure saved as "+FIG_OUTPUT_DIR+"/"+FIG_OUTPUT_FILENAME+"!") + print(" Figure saved as " + FIG_OUTPUT_DIR + "/" + FIG_OUTPUT_FILENAME + "!") # ====================================================================== # ======================End Plot MODEL Binned Data====================== # ====================================================================== diff --git a/diagnostics/convective_transition_diag/convecTransCriticalCollapse.py b/diagnostics/convective_transition_diag/convecTransCriticalCollapse.py index 246b39f78..ce66f74c5 100644 --- a/diagnostics/convective_transition_diag/convecTransCriticalCollapse.py +++ b/diagnostics/convective_transition_diag/convecTransCriticalCollapse.py @@ -96,7 +96,7 @@ # Create and read user-specified parameters os.system("python "+os.environ["POD_HOME"]+"/convecTransCriticalCollapse_usp.py") -with open(os.environ["WK_DIR"]+"/convecTransCriticalCollapse_parameters.json") as outfile: +with open(os.environ["WORK_DIR"]+"/convecTransCriticalCollapse_parameters.json") as outfile: params_data=json.load(outfile) print("...Loaded!") diff --git a/diagnostics/convective_transition_diag/convecTransCriticalCollapse_usp.py b/diagnostics/convective_transition_diag/convecTransCriticalCollapse_usp.py index ba0f85a15..5e28f90ac 100644 --- a/diagnostics/convective_transition_diag/convecTransCriticalCollapse_usp.py +++ b/diagnostics/convective_transition_diag/convecTransCriticalCollapse_usp.py @@ -37,7 +37,7 @@ # Directory & Filename for saving binned results (netCDF4) # tave or qsat_int will be appended to BIN_OUTPUT_FILENAME -BIN_OUTPUT_DIR=os.environ["WK_DIR"]+"/model/netCDF" +BIN_OUTPUT_DIR=os.environ["WORK_DIR"]+"/model/netCDF" BIN_OUTPUT_FILENAME=os.environ["CASENAME"]+".convecTransBasic" if BULK_TROPOSPHERIC_TEMPERATURE_MEASURE==1: @@ -55,7 +55,7 @@ # Directory & Filename for saving figures # convecTransCriticalCollapse.py generates 2 sets figures for MODEL -FIG_OUTPUT_DIR=os.environ["WK_DIR"]+"/model/PS" +FIG_OUTPUT_DIR=os.environ["WORK_DIR"] + "/model/PS" # Figure filename for Convective Transition Statistics (CTS) # collapsed by shifting CWV by Critical CWV FIG_FILENAME_CTS=os.environ["CASENAME"]+".convecTransCriticalCollapse_stats"+"_"+TEMP_VAR+".eps" @@ -69,20 +69,20 @@ +RES+"_fillNrCWV_"\ +TEMP_VAR+".nc")) # convecTransCriticalCollapse.py generates 2 sets figures for OBS too -FIG_OBS_DIR=os.environ["WK_DIR"]+"/obs/PS" +FIG_OBS_DIR=os.environ["WORK_DIR"] + "/obs/PS" FIG_OBS_FILENAME_CTS="convecTransCriticalCollapse_stats_R2TMIv7r1_200206_201405_res="\ +RES+"_fillNrCWV_"+TEMP_VAR+".eps" FIG_OBS_FILENAME_WC="convecTransCriticalCollapse_wc_R2TMIv7r1_200206_201405_res="\ +RES+"_fillNrCWV_"+TEMP_VAR+".eps" # Don't fit/plot bins with PDFPDF_THRESHOLD - pdf_gt_th=numpy.zeros(PDF.shape) + pdf_gt_th = numpy.zeros(PDF.shape) with numpy.errstate(invalid="ignore"): - pdf_gt_th[PDF>PDF_THRESHOLD]=1 - P[pdf_gt_th==0]=numpy.nan - CP[pdf_gt_th==0]=numpy.nan - PDF=numpy.copy(PDF) - PDF_pe=PDF*CP + pdf_gt_th[PDF > PDF_THRESHOLD] = 1 + P[pdf_gt_th == 0]=numpy.nan + CP[pdf_gt_th == 0]=numpy.nan + PDF = numpy.copy(PDF) + PDF_pe = PDF*CP # Indicator of (temp,reg) with wide CWV range # & other criteria specified below # i.e., t_reg_I will be further modified below - t_reg_I=(numpy.squeeze(numpy.sum(pdf_gt_th,axis=1))*CWV_BIN_WIDTH>CWV_RANGE_THRESHOLD) + t_reg_I = (numpy.squeeze(numpy.sum(pdf_gt_th, axis=1))*CWV_BIN_WIDTH > CWV_RANGE_THRESHOLD) - ### Connected Component Section + # Connected Component Section # The CWV_RANGE_THRESHOLD-Criterion must be satisfied by a connected component - # Default: off for MODEL/on for OBS/on for Fitting + # Default: off for MODEL/on for OBS/on for Fitting # Fot R2TMIv7 (OBS) this doesn't make much difference - # But when models behave "funny" one may miss by turning on this section + # But when models behave "funny" one may miss by turning on this section # For fitting procedure (finding critical CWV at which the precip picks up) - # Default: on + # Default: on for reg in numpy.arange(P0.shape[0]): for Tidx in numpy.arange(P0.shape[2]): if t_reg_I[reg, Tidx]: @@ -167,8 +167,8 @@ def convecTransCriticalCollapse_fitCritical(argsv1,*argsv2): else: t_reg_I[reg,Tidx]=False pdf_gt_th[reg,:,Tidx]=0 - ### End of Connected Component Section - + # End of Connected Component Section + # # Copy P, CP into p, cp for (temp,reg) with "wide CWV range" & "large PDF" p=numpy.zeros(P.shape) cp=numpy.zeros(P.shape) @@ -180,7 +180,7 @@ def convecTransCriticalCollapse_fitCritical(argsv1,*argsv2): p[pdf_gt_th==0]=numpy.nan cp[pdf_gt_th==0]=numpy.nan - # Disgard (temp,reg) if conditional probability < CP_THRESHOLD + # Discard (temp,reg) if conditional probability < CP_THRESHOLD for reg in numpy.arange(P.shape[0]): for Tidx in numpy.arange(P.shape[2]): if t_reg_I[reg,Tidx] and cp[reg,:,Tidx][cp[reg,:,Tidx]>=0.0].size>0: @@ -195,29 +195,29 @@ def convecTransCriticalCollapse_fitCritical(argsv1,*argsv2): for Tidx in numpy.arange(t_reg_I.shape[1]): if t_reg_I[reg,Tidx]: p_gt_pref=p[reg,:,Tidx]>PRECIP_REF - if numpy.nonzero(p_gt_pref)[0].size>0: # p_gt_pref non-empty + if numpy.nonzero(p_gt_pref)[0].size > 0: # p_gt_pref non-empty wr_idx=numpy.nonzero(p_gt_pref)[0][0] - wr_idx-=(p[reg,wr_idx,Tidx]-PRECIP_REF)/(p[reg,wr_idx,Tidx]-p[reg,wr_idx-1,Tidx]) + wr_idx -= (p[reg, wr_idx, Tidx]-PRECIP_REF)/(p[reg,wr_idx,Tidx]-p[reg,wr_idx-1,Tidx]) wr[reg,Tidx]=(wr_idx+1)*CWV_BIN_WIDTH - else: # p1=1)[0][0] - TEMP_MAX=numpy.where(numpy.sum(t_reg_I,axis=0)>=1)[0][-1] + TEMP_MIN = numpy.where(numpy.sum(t_reg_I, axis=0) >= 1)[0][0] + TEMP_MAX = numpy.where(numpy.sum(t_reg_I, axis=0) >= 1)[0][-1] # Start fitting to find Critical CWV (wc) # Working with the assumption that the slope of the asymptote # to the pickup curves do not depend on temperature (as in OBS) - wc=numpy.zeros(t_reg_I.shape) # Find wc-wr first, then wc=wr-(wr-wc) - al=numpy.zeros(t_reg_I.shape[0]) # al:alpha, slope of pickup asymptote - cwvRange=numpy.linspace(CWV_FIT_RANGE_MIN, - CWV_FIT_RANGE_MAX, - int((CWV_FIT_RANGE_MAX-CWV_FIT_RANGE_MIN)/CWV_BIN_WIDTH+1)) + wc = numpy.zeros(t_reg_I.shape) # Find wc-wr first, then wc=wr-(wr-wc) + al = numpy.zeros(t_reg_I.shape[0]) # al:alpha, slope of pickup asymptote + cwvRange = numpy.linspace(CWV_FIT_RANGE_MIN, + CWV_FIT_RANGE_MAX, + int((CWV_FIT_RANGE_MAX-CWV_FIT_RANGE_MIN)/CWV_BIN_WIDTH + 1)) # Use the 3 most probable Temperature bins only # These should best capture the pickup over tropical oceans @@ -225,15 +225,15 @@ def convecTransCriticalCollapse_fitCritical(argsv1,*argsv2): for reg in numpy.arange(t_reg_I.shape[0]): with warnings.catch_warnings(): warnings.simplefilter("ignore") - mpdf=numpy.nansum(PDF[reg,:,:],axis=0) # marginal PDF - mp3t=sorted(range(len(mpdf)), key=lambda k: mpdf[k])[-3:] + mpdf = numpy.nansum(PDF[reg, :, :], axis=0) # marginal PDF + mp3t = sorted(range(len(mpdf)), key=lambda k: mpdf[k])[-3:] with warnings.catch_warnings(): warnings.simplefilter("ignore") - p_mp3t=numpy.nanmean(numpy.array([\ - interp1d(cwv_bin_center-wr[reg,mp3t[0]],p[reg,:,mp3t[0]],'linear', 0, True, False) (cwvRange), - interp1d(cwv_bin_center-wr[reg,mp3t[1]],p[reg,:,mp3t[1]],'linear', 0, True, False) (cwvRange), - interp1d(cwv_bin_center-wr[reg,mp3t[2]],p[reg,:,mp3t[2]],'linear', 0, True, False) (cwvRange) - ]),axis=0) + p_mp3t=numpy.nanmean(numpy.array([ + interp1d(cwv_bin_center-wr[reg, mp3t[0]], p[reg, :, mp3t[0]], 'linear', 0, True, False)(cwvRange), + interp1d(cwv_bin_center-wr[reg, mp3t[1]], p[reg, :, mp3t[1]], 'linear', 0, True, False)(cwvRange), + interp1d(cwv_bin_center-wr[reg, mp3t[2]], p[reg, :, mp3t[2]], 'linear', 0, True, False)(cwvRange) + ]), axis=0) with warnings.catch_warnings(): warnings.simplefilter("ignore") fitRange=((p_mp3t>PRECIP_FIT_MIN)*(p_mp3t + Eulerian Storm Track

Eulerian Storm Track Diagnostic

-By filtering atmospheric data temporally, in a manner that removes the diurnal and the greater than weekly variability, one can isolate the synoptic variability (Blackmon et al. 1976). Then, the standard deviation of the filtered data at each latitude and longitude can be interpreted as the climatological baroclinic wave activity, which, for historical reasons, is termed storm tracks (Wallace et al. 1988). The storm tracks give a simple large-scale metric for the skill in the model representation of extratropical cyclones, in terms of location of the storms, their seasonality and their intensity, which correlates very strongly with transient poleward energy transport. + By filtering atmospheric data temporally, in a manner that removes the diurnal and the greater than weekly variability, + one can isolate the synoptic variability (Blackmon et al. 1976). Then, the standard deviation of the filtered data at + each latitude and longitude can be interpreted as the climatological baroclinic wave activity, which, + for historical reasons, is termed storm tracks (Wallace et al. 1988). The storm tracks give a simple + large-scale metric for the skill in the model representation of extratropical cyclones, in terms of location of + the storms, their seasonality and their intensity, which correlates very strongly with transient poleward + energy transport. -To isolate the synoptic timescale, this algorithm uses 24-hour differences of daily-averaged data. Using daily averages removes the diurnal cycle and the 24-hour differencing removes variability beyond 5 days (Wallace et al. 1988). After filtering the data to create anomalies, the variance of the anomalies is calculated across the four seasons for each year. Then the seasonal variances are averaged across all years. For the first year in the sequence, the variance for JF is calculated and treated as the first DJF instance. For the final December in the sequence is not used in the calculation. + To isolate the synoptic timescale, this algorithm uses 24-hour differences of daily-averaged data. + Using daily averages removes the diurnal cycle and the 24-hour differencing removes variability beyond 5 days + (Wallace et al. 1988). After filtering the data to create anomalies, the variance of the anomalies is calculated + across the four seasons for each year. Then the seasonal variances are averaged across all years. + For the first year in the sequence, the variance for JF is calculated and treated as the first DJF instance. + For the final December in the sequence is not used in the calculation.

Full Documentation and Contact Information

diff --git a/diagnostics/eulerian_storm_track/eulerian_storm_track.py b/diagnostics/eulerian_storm_track/eulerian_storm_track.py index 81650eda8..6d64c730e 100755 --- a/diagnostics/eulerian_storm_track/eulerian_storm_track.py +++ b/diagnostics/eulerian_storm_track/eulerian_storm_track.py @@ -1,193 +1,222 @@ # Code created by Jeyavinoth Jeyaratnam, to be implemented in MDTF # Import standarad Python packages -import numpy as np +import numpy as np from netCDF4 import Dataset import os import glob # Import my code from the current folder import eulerian_storm_track_util as est -import plotter # do not need this, just debugging purpose +import plotter # do not need this, just debugging purpose print("****************************************************************************************") print("Started Exeuction of Eulerian Storm Track Diagnostic Package (eulerian_storm_track.py)!") print("****************************************************************************************") # Setting up the necessary variable names -os.environ['v850_file'] = '*.'+os.environ['v850_var']+'.day.nc' +os.environ['v850_file'] = '*.' + os.environ['v850_var'] + '.day.nc' # Model output filename convection -os.environ['MODEL_OUTPUT_DIR'] = os.environ['DATADIR']+'/day' +os.environ['MODEL_OUTPUT_DIR'] = os.environ['DATADIR'] + '/day' missing_file = 0 -if (len(glob.glob(os.environ['MODEL_OUTPUT_DIR']+'/'+os.environ['v850_file']))==0): - print('Required V850 file missing!') - missing_file = 1 +if len(glob.glob(os.environ['MODEL_OUTPUT_DIR'] + '/' + os.environ['v850_file'])) == 0: + print('Required V850 file missing!') + missing_file = 1 -if (missing_file == 1): - print('MISSING FILES: Eulerian Strom Tracker will NOT be executed!') +if missing_file == 1: + print('MISSING FILES: Eulerian Strom Tracker will NOT be executed!') else: - ########################################################## - # Create the necessary directories - ########################################################## - - if not os.path.exists(os.environ['WK_DIR']+'/model'): - os.makedirs(os.environ['WK_DIR']+'/model') - if not os.path.exists(os.environ['WK_DIR']+'/model/netCDF'): - os.makedirs(os.environ['WK_DIR']+'/model/netCDF') - if not os.path.exists(os.environ['WK_DIR']+'/model/PS'): - os.makedirs(os.environ['WK_DIR']+'/model/PS') - if not os.path.exists(os.environ['WK_DIR']+'/obs'): - os.makedirs(os.environ['WK_DIR']+'/obs') - if not os.path.exists(os.environ['WK_DIR']+'/obs/netCDF'): - os.makedirs(os.environ['WK_DIR']+'/obs/netCDF') - if not os.path.exists(os.environ['WK_DIR']+'/obs/PS'): - os.makedirs(os.environ['WK_DIR']+'/obs/PS') - - ################################################################## - # Reading in the necessary data, and computing the daily eddies - ################################################################## - - netcdf_filename = os.environ['MODEL_OUTPUT_DIR']+'/'+os.environ['CASENAME']+'.'+os.environ['v850_var']+'.day.nc' - if (not os.path.exists(netcdf_filename)): - print ('Cannot Find File: ', netcdf_filename) - - # temporarily add the lat_var and lon_var - # since these values seem to be missing - os.environ['lat_var'] = 'lat' - os.environ['lon_var'] = 'lon' - os.environ['time_var'] = 'time' - - # reading in the model data - ncid = Dataset(netcdf_filename, 'r') - lat = ncid.variables[os.environ['lat_var']][:] - lat.fill_value = np.nan - lat = lat.filled() - lon = ncid.variables[os.environ['lon_var']][:] - lon.fill_value = np.nan - lon = lon.filled() - time = ncid.variables[os.environ['time_var']][:] - time.fill_value = np.nan - time = time.filled() - v850 = ncid.variables[os.environ['v850_var']][:] - v850.fill_value = np.nan - v850 = v850.filled() - ncid.close() - - # creating the lat and lon in grid format - lonGrid, latGrid = np.meshgrid(lon, lat) - - # getting the daily difference X(t+1) - X(t) - eddies = est.transient_eddies(v850) - - ########################################################## - # Creating the plot for the different seasons - ########################################################## - - print('*** Processing Model Data...') - model_zonal_means = {} - model_zonal_means['lat'] = lat - - season = 'djf' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - season = 'mam' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - season = 'jja' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - season = 'son' - print('*** Processing Season: %s'%(season.upper())) - model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['FIRSTYR']), time, season=season) - out_file = os.environ['WK_DIR']+'/model/%s.%s.png'%(os.environ['CASENAME'], season.upper()) - plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, title='%s (%s to %s)'%(season.upper(), os.environ['FIRSTYR'], os.environ['LASTYR']), levels=np.arange(0,6), extend='max') - - - #### OBS data ### - print('*** Processing Observations: ERA-Interim') - obs_data_file = os.environ['OBS_DATA'] + '/erai.nc' - obs_topo_file = os.environ['OBS_DATA'] + '/erai_topo.nc' - obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, erai_zonal_means = est.obs_std_dev(obs_data_file, obs_topo_file) - - obs_max_lim = 6 - - print('*** Processing Season: DJF') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'DJF') - plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, title='%s (%d to %d)'%('DJF', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: MAM') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'MAM') - plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, title='%s (%d to %d)'%('MAM', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: JJA') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'JJA') - plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, title='%s (%d to %d)'%('JJA', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: SON') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.erai.png'%(os.environ['CASENAME'], 'SON') - plotter.plot(obs_lon, obs_lat, son, out_file=out_file, title='%s (%d to %d)'%('SON', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Observations: ERA-5') - obs_data_file = os.environ['OBS_DATA'] + '/era5.nc' - obs_topo_file = os.environ['OBS_DATA'] + '/era5_topo.nc' - obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, era5_zonal_means = est.obs_std_dev(obs_data_file, obs_topo_file) - - obs_max_lim = 6 - - print('*** Processing Season: DJF') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'DJF') - plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, title='%s (%d to %d)'%('DJF', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: MAM') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'MAM') - plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, title='%s (%d to %d)'%('MAM', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: JJA') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'JJA') - plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, title='%s (%d to %d)'%('JJA', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - print('*** Processing Season: SON') - out_file = os.environ['WK_DIR']+'/obs/%s.%s.era5.png'%(os.environ['CASENAME'], 'SON') - plotter.plot(obs_lon, obs_lat, son, out_file=out_file, title='%s (%d to %d)'%('SON', obs_start_year, obs_end_year), levels=np.arange(0,obs_max_lim), extend='max') - - - ########################################################## - #### Plotting Zonal Means for all the different seasons - ########################################################## - print('*** Plotting Zonal Means Image') - out_file = os.environ['WK_DIR']+'/%s.zonal_means.png'%(os.environ['CASENAME']) - plotter.plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file) - - ########################################################## - # Editting HTML Template for the current CASENAME - ########################################################## - - print('*** Editting Templates...') - # Copy template html (and delete old html if necessary) - if os.path.isfile( os.environ["WK_DIR"]+"/eulerian_storm_track.html" ): - os.system("rm -f "+os.environ["WK_DIR"]+"/eulerian_storm_track.html") - - cmd = "cp "+os.environ["POD_HOME"]+"/eulerian_storm_track.html "+os.environ["WK_DIR"]+"/" - os.system(cmd) - cmd = "cp "+os.environ["POD_HOME"]+"/doc/MDTF_Documentation_eulerian_storm_track.pdf "+os.environ["WK_DIR"]+"/" - os.system(cmd) - - # ====================================================================== - # End of HTML sections - # ====================================================================== - - print("*****************************************************************************") - print("Eulerian Storm Track Diagnostic Package (eulerian_storm_track.py) Executed!") - print("*****************************************************************************") - + ########################################################## + # Create the necessary directories + ########################################################## + + if not os.path.exists(os.environ['WORK_DIR'] + '/model'): + os.makedirs(os.environ['WORK_DIR'] + '/model') + if not os.path.exists(os.environ['WORK_DIR'] + '/model/netCDF'): + os.makedirs(os.environ['WORK_DIR'] + '/model/netCDF') + if not os.path.exists(os.environ['WORK_DIR'] + '/model/PS'): + os.makedirs(os.environ['WORK_DIR'] + '/model/PS') + if not os.path.exists(os.environ['WORK_DIR'] + '/obs'): + os.makedirs(os.environ['WORK_DIR'] + '/obs') + if not os.path.exists(os.environ['WORK_DIR'] + '/obs/netCDF'): + os.makedirs(os.environ['WORK_DIR'] + '/obs/netCDF') + if not os.path.exists(os.environ['WORK_DIR'] + '/obs/PS'): + os.makedirs(os.environ['WORK_DIR'] + '/obs/PS') + + ################################################################## + # Reading in the necessary data, and computing the daily eddies + ################################################################## + + netcdf_filename = os.environ['MODEL_OUTPUT_DIR'] + '/' + os.environ['CASENAME'] + '.' + os.environ[ + 'v850_var'] + '.day.nc' + if not os.path.exists(netcdf_filename): + print('Cannot Find File: ', netcdf_filename) + + # temporarily add the lat_var and lon_var + # since these values seem to be missing + os.environ['lat_var'] = 'lat' + os.environ['lon_var'] = 'lon' + os.environ['time_var'] = 'time' + + # reading in the model data + ncid = Dataset(netcdf_filename, 'r') + lat = ncid.variables[os.environ['lat_var']][:] + lat.fill_value = np.nan + lat = lat.filled() + lon = ncid.variables[os.environ['lon_var']][:] + lon.fill_value = np.nan + lon = lon.filled() + time = ncid.variables[os.environ['time_var']][:] + time.fill_value = np.nan + time = time.filled() + v850 = ncid.variables[os.environ['v850_var']][:] + v850.fill_value = np.nan + v850 = v850.filled() + ncid.close() + + # creating the lat and lon in grid format + lonGrid, latGrid = np.meshgrid(lon, lat) + + # getting the daily difference X(t+1) - X(t) + eddies = est.transient_eddies(v850) + + ########################################################## + # Creating the plot for the different seasons + ########################################################## + + print('*** Processing Model Data...') + model_zonal_means = {} + model_zonal_means['lat'] = lat + + season = 'djf' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + season = 'mam' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + season = 'jja' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + season = 'son' + print('*** Processing Season: %s' % (season.upper())) + model_std_dev, model_zonal_means[season] = est.model_std_dev(eddies, int(os.environ['startdate']), time, + season=season) + out_file = os.environ['WORK_DIR'] + '/model/%s.%s.png' % (os.environ['CASENAME'], season.upper()) + plotter.plot(lonGrid, latGrid, model_std_dev, out_file=out_file, + title='%s (%s to %s)' % (season.upper(), os.environ['startdate'], os.environ['enddate']), + levels=np.arange(0, 6), extend='max') + + # OBS data # + print('*** Processing Observations: ERA-Interim') + obs_data_file = os.environ['OBS_DATA'] + '/erai.nc' + obs_topo_file = os.environ['OBS_DATA'] + '/erai_topo.nc' + obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, erai_zonal_means = est.obs_std_dev( + obs_data_file, obs_topo_file) + + obs_max_lim = 6 + + print('*** Processing Season: DJF') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'DJF') + plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, + title='%s (%d to %d)' % ('DJF', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: MAM') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'MAM') + plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, + title='%s (%d to %d)' % ('MAM', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: JJA') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'JJA') + plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, + title='%s (%d to %d)' % ('JJA', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: SON') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.erai.png' % (os.environ['CASENAME'], 'SON') + plotter.plot(obs_lon, obs_lat, son, out_file=out_file, + title='%s (%d to %d)' % ('SON', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Observations: ERA-5') + obs_data_file = os.environ['OBS_DATA'] + '/era5.nc' + obs_topo_file = os.environ['OBS_DATA'] + '/era5_topo.nc' + obs_lat, obs_lon, djf, mam, jja, son, obs_start_year, obs_end_year, era5_zonal_means = est.obs_std_dev( + obs_data_file, obs_topo_file) + + obs_max_lim = 6 + + print('*** Processing Season: DJF') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'DJF') + plotter.plot(obs_lon, obs_lat, djf, out_file=out_file, + title='%s (%d to %d)' % ('DJF', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: MAM') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'MAM') + plotter.plot(obs_lon, obs_lat, mam, out_file=out_file, + title='%s (%d to %d)' % ('MAM', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: JJA') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'JJA') + plotter.plot(obs_lon, obs_lat, jja, out_file=out_file, + title='%s (%d to %d)' % ('JJA', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + print('*** Processing Season: SON') + out_file = os.environ['WORK_DIR'] + '/obs/%s.%s.era5.png' % (os.environ['CASENAME'], 'SON') + plotter.plot(obs_lon, obs_lat, son, out_file=out_file, + title='%s (%d to %d)' % ('SON', obs_start_year, obs_end_year), levels=np.arange(0, obs_max_lim), + extend='max') + + ########################################################## + # Plotting Zonal Means for all the different seasons + ########################################################## + print('*** Plotting Zonal Means Image') + out_file = os.environ['WORK_DIR'] + '/%s.zonal_means.png' % (os.environ['CASENAME']) + plotter.plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file) + + ########################################################## + # Editting HTML Template for the current CASENAME + ########################################################## + + print('*** Editing Templates...') + # Copy template html (and delete old html if necessary) + if os.path.isfile(os.environ["WORK_DIR"] + "/eulerian_storm_track.html"): + os.system("rm -f " + os.environ["WORK_DIR"] + "/eulerian_storm_track.html") + + cmd = "cp " + os.environ["POD_HOME"] + "/eulerian_storm_track.html " + os.environ["WORK_DIR"] + "/" + os.system(cmd) + cmd = "cp " + os.environ["POD_HOME"] + "/doc/MDTF_Documentation_eulerian_storm_track.pdf " + os.environ[ + "WORK_DIR"] + "/" + os.system(cmd) + + # ====================================================================== + # End of HTML sections + # ====================================================================== + + print("*****************************************************************************") + print("Eulerian Storm Track Diagnostic Package (eulerian_storm_track.py) Executed!") + print("*****************************************************************************") diff --git a/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py b/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py index 098750838..1a6876309 100755 --- a/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py +++ b/diagnostics/eulerian_storm_track/eulerian_storm_track_functions.py @@ -1,10 +1,10 @@ #!/usr/bin/env python -############# EULERIAN STROM TRACKER ############ -############# Necessary Functions ############### -###### Created by: Jeyavinoth Jeyaratnam #### -###### Created Date: 03/29/2019 #### -###### Last Modified: 01/17/2020 #### +# EULERIAN STROM TRACKER ############ +# Necessary Functions ############### +# Created by: Jeyavinoth Jeyaratnam #### +# Created Date: 03/29/2019 #### +# Last Modified: 01/17/2020 #### ################################################# # Importing standard libraries @@ -16,149 +16,165 @@ 1) Average 6hrly to daily 2) take x(t+1) - x(t) 3) for each year, for season, get std_dev - 4) avergae std_dev for all years + 4) average std_dev for all years ''' -def six_hrly_to_daily(data, start_year, time): - ''' - Data has to be provided as six hourly timesteps, in a numpy array format (time x lon x lat), lon and lat can be changed, but keep track of it - the time variable has to be given in six hourly increments, since the start_year [0, 6, 12, 18, 24, 30, 36, 42, 48] - where start_year is the starting year of the given data - Output: - numpy array in the format time x lon x lat (lon, lat depends on your input) - output time dimension size will be the number of days provided in the time array - ''' - # convert time to numpy array - time = np.asarray(time) +def six_hrly_to_daily(data, time): + """ + Data has to be provided as six hourly timesteps, in a numpy array format (time x lon x lat), lon and lat can be + changed, but keep track of it + the time variable has to be given in six hourly increments, since the start_year + [0, 6, 12, 18, 24, 30, 36, 42, 48] + where start_year is the starting year of the given data - # check if time array and data time dimension is the same - if (len(time) != data.shape[0]): - raise Exception ("Time dimensions don't match!") + Output: + numpy array in the format time x lon x lat (lon, lat depends on your input) + output time dimension size will be the number of days provided in the time array + """ + # convert time to numpy array + time = np.asarray(time) - # converting six hrly timesteps into the days - time_in_days = (time//24) + 1 - - min_time = min(time_in_days) - max_time = max(time_in_days) - time_range = range(min_time, max_time+1) + # check if time array and data time dimension is the same + if len(time) != data.shape[0]: + raise Exception("Time dimensions don't match!") - out_time = np.empty((len(time_range),))*np.nan - out_data = np.empty((len(time_range), data.shape[1], data.shape[2]))*np.nan + # converting six hrly timesteps into the days + time_in_days = (time//24) + 1 - # looping through the days and creating the output array - for ind, day in enumerate(time_range): - out_data[ind, :, :] = np.nansum(data[time_in_days == day, :, :], axis=0) - out_time[ind] = day + min_time = min(time_in_days) + max_time = max(time_in_days) + time_range = range(min_time, max_time+1) + + out_time = np.empty((len(time_range),))*np.nan + out_data = np.empty((len(time_range), data.shape[1], data.shape[2]))*np.nan + + # looping through the days and creating the output array + for ind, day in enumerate(time_range): + out_data[ind, :, :] = np.nansum(data[time_in_days == day, :, :], axis=0) + out_time[ind] = day + + return out_data, out_time - return out_data, out_time def daily_diff(daily_data): - ''' - Data has to be provided as daily_data - it will compute the difference between the current day and the previous day - i.e. X(t+1) - X(t), nans for the last index - ''' - # pad the right of the array with nan values along the first dimension - # then extract the values from the 2nd column (index = 1) to the end - # this will give us a shifted array of daily data, i.e. X(t+1) - daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] + """ + Data has to be provided as daily_data + it will compute the difference between the current day and the previous day + i.e. X(t+1) - X(t), nans for the last index + """ + # pad the right of the array with nan values along the first dimension + # then extract the values from the 2nd column (index = 1) to the end + # this will give us a shifted array of daily data, i.e. X(t+1) + daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] - return daily_data_shift - daily_data # X(t+1) - X(t), with nan values for the last time dimension + return daily_data_shift - daily_data # X(t+1) - X(t), with nan values for the last time dimension def std_dev(data, time_ind): - ''' - Given data input in the format (time, lat, lon) - we will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size of the time dimension - ''' - out_std_dev = np.empty((data.shape[1], data.shape[2]))*np.nan - - # check if any value is true for the selected time, if so then return nan values, else compute standard deviation - if np.all(np.invert(time_ind)): - print ('No time index selected!') - return (out_std_dev) - else: - return np.nanstd(data[time_ind, :, :], axis=0) - -def get_time_ind(start_year, time, season='djf'): - ''' Get the time index for the given season ''' - - # convert time as numpy array - time = np.asarray(time) - - # getting the datetime values for the time index - dates_month=[] - dates_year=[] - for i_time in time: - temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=np.float(i_time)-1) - dates_month.append(temp_time.month) - dates_year.append(temp_time.year) - - dates_month = np.asarray(dates_month) - - # getting the time index - if (season == ''): - raise Exception('Set which season you want to extract!') - elif (season == 'djf'): - time_ind = (dates_month == 12) | (dates_month == 1) | (dates_month == 2) - elif (season == 'mam'): - time_ind = (dates_month == 3) | (dates_month == 4) | (dates_month == 5) - elif (season == 'jja'): - time_ind = (dates_month == 6) | (dates_month == 7) | (dates_month == 8) - elif (season == 'son'): - time_ind = (dates_month == 9) | (dates_month == 10) | (dates_month == 11) - - return time_ind - -def old_std_dev(data, start_year, time, time_period='yearly', season=''): - ''' - Data input has to be daily in the format (time, lat, lon) - start_year has to be the start year of the given array - if an incomplete data array along the time dimension is provided, then you have to specify the time variable - time vaiable has to be specified in days, since start_year [1,2,3,4,5,6,7], default=finds the time starting from day 1 - time_period includes 'yearly', 'seasonally', 'all', default='all' means avarage of all years - if 'byseason' then have to set season variable to be: djf', 'mam', 'jja', 'son' - - Output: - returns standard_deviation for the given time_period, and the time array that corresponds to the std_dev output - out_time is zero for time_period='all' - ''' - # convert time as numpy array - time = np.asarray(time) - - # getting the datetime values for the time index - dates_month=[] - dates_year=[] - for i_time in time: - temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=np.float(i_time)-1) - dates_month.append(temp_time.month) - dates_year.append(temp_time.year) - - uni_year = sorted(set(dates_year)) - dates_month = np.asarray(dates_month) - dates_year = np.asarray(dates_year) - - # getting the time_ind - if (time_period == 'all'): - return np.nanstd(data, axis=0), 0 - else: + """ + Given data input in the format (time, lat, lon) + we will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the + size of the time dimension + """ + out_std_dev = np.empty((data.shape[1], data.shape[2]))*np.nan + + # check if any value is true for the selected time, if so then return nan values, else compute standard deviation + if np.all(np.invert(time_ind)): + print('No time index selected!') + return out_std_dev + else: + return np.nanstd(data[time_ind, :, :], axis=0) + + +def get_time_ind(start_year, time, season: str = 'djf'): + """ + Args: + start_year: + time: + season: + + Returns: + The time index for the given season + """ + + # convert time as numpy array + time = np.asarray(time) + + # getting the datetime values for the time index + dates_month = [] + dates_year = [] + for i_time in time: + temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=float(i_time)-1) + dates_month.append(temp_time.month) + dates_year.append(temp_time.year) + + dates_month = np.asarray(dates_month) + # getting the time index - if (time_period == 'yearly'): - time_ind = (dates_month > 0) - elif (time_period == 'seasonally'): - if (season == ''): + if season == '': raise Exception('Set which season you want to extract!') - elif (season == 'djf'): + elif season == 'djf': time_ind = (dates_month == 12) | (dates_month == 1) | (dates_month == 2) - elif (season == 'mam'): + elif season == 'mam': time_ind = (dates_month == 3) | (dates_month == 4) | (dates_month == 5) - elif (season == 'jja'): + elif season == 'jja': time_ind = (dates_month == 6) | (dates_month == 7) | (dates_month == 8) - elif (season == 'son'): + elif season == 'son': time_ind = (dates_month == 9) | (dates_month == 10) | (dates_month == 11) - else: - raise Exception('Error in the time_period set!') + + return time_ind + + +def old_std_dev(data, start_year, time, time_period: str = 'yearly', season: str = ''): + """ + Data input has to be daily in the format (time, lat, lon) + start_year has to be the start year of the given array + if an incomplete data array along the time dimension is provided, then you have to specify the time variable + time vaiable has to be specified in days, since start_year [1,2,3,4,5,6,7], default=finds the time starting + from day 1 + time_period includes 'yearly', 'seasonally', 'all', default='all' means avarage of all years + if 'byseason' then have to set season variable to be: djf', 'mam', 'jja', 'son' + + Output: + returns standard_deviation for the given time_period, and the time array that corresponds to the std_dev output + out_time is zero for time_period='all' + """ + # convert time as numpy array + time = np.asarray(time) + + # getting the datetime values for the time index + dates_month = [] + dates_year = [] + for i_time in time: + temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=float(i_time)-1) + dates_month.append(temp_time.month) + dates_year.append(temp_time.year) + + uni_year = sorted(set(dates_year)) + dates_month = np.asarray(dates_month) + dates_year = np.asarray(dates_year) + + # getting the time_ind + if time_period == 'all': + return np.nanstd(data, axis=0), 0 + else: + # getting the time index + if time_period == 'yearly': + time_ind = (dates_month > 0) + elif time_period == 'seasonally': + if season == '': + raise Exception('Set which season you want to extract!') + elif season == 'djf': + time_ind = (dates_month == 12) | (dates_month == 1) | (dates_month == 2) + elif season == 'mam': + time_ind = (dates_month == 3) | (dates_month == 4) | (dates_month == 5) + elif season == 'jja': + time_ind = (dates_month == 6) | (dates_month == 7) | (dates_month == 8) + elif season == 'son': + time_ind = (dates_month == 9) | (dates_month == 10) | (dates_month == 11) + else: + raise Exception('Error in the time_period set!') # initialize output array out_time = np.empty((len(uni_year),))*np.nan @@ -167,23 +183,21 @@ def old_std_dev(data, start_year, time, time_period='yearly', season=''): # for each year we have to get the std_dev data for out_ind, year in enumerate(uni_year): - # setting the time array output - out_time[out_ind] = year + # setting the time array output + out_time[out_ind] = year - # getting the matching index for the each unique year - year_ind = (dates_year == year) + # getting the matching index for the each unique year + year_ind = (dates_year == year) - # overlapping with the season index, or all if time_period is yearly - final_ind = year_ind & time_ind + # overlapping with the season index, or all if time_period is yearly + final_ind = year_ind & time_ind - # check if any value is true for the selected time, if so then continue, else compute standard deviation - if np.all(np.invert(final_ind)): - print ('Debug: Nothing found!') - breakpoint() - continue - else: - out_std_dev[out_ind, :, :] = np.nanstd(data[final_ind, :, :], axis=0) + # check if any value is true for the selected time, if so then continue, else compute standard deviation + if np.all(np.invert(final_ind)): + print('Debug: Nothing found!') + breakpoint() + continue + else: + out_std_dev[out_ind, :, :] = np.nanstd(data[final_ind, :, :], axis=0) return out_std_dev, out_time - - diff --git a/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py b/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py index 701033e66..ec11d2755 100755 --- a/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py +++ b/diagnostics/eulerian_storm_track/eulerian_storm_track_util.py @@ -1,10 +1,10 @@ #!/usr/bin/env python -############# EULERIAN STROM TRACKER ############ -###### Created by: Jeyavinoth Jeyaratnam #### -###### Created Date: 03/29/2019 #### -###### Last Modified: 05/09/2019 #### -################################################# +# EULERIAN STROM TRACKER ############ +# Created by: Jeyavinoth Jeyaratnam #### +# Created Date: 03/29/2019 #### +# Last Modified: 05/09/2019 #### +############################################ # Importing standard libraries import numpy as np @@ -13,157 +13,164 @@ from netCDF4 import Dataset import warnings + def transient_eddies(daily_data): - ''' - Data has to be provided as daily_data - it will compute the difference between the current day and the previous day - i.e. X(t+1) - X(t), nans for the last index - in Booth et al., 2017, vprime = (x(t+1) - x(t))/2. - ''' - # pad the right of the array with nan values along the first dimension - # then extract the values from the 2nd column (index = 1) to the end - # this will give us a shifted array of daily data, i.e. X(t+1) - daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] - - return (daily_data_shift - daily_data)/2. # X(t+1) - X(t), with nan values for the last time dimension + """ + Data has to be provided as daily_data + it will compute the difference between the current day and the previous day + i.e. X(t+1) - X(t), nans for the last index + in Booth et al., 2017, vprime = (x(t+1) - x(t))/2. + """ + # pad the right of the array with nan values along the first dimension + # then extract the values from the 2nd column (index = 1) to the end + # this will give us a shifted array of daily data, i.e. X(t+1) + daily_data_shift = np.pad(daily_data, ((0,1), (0,0), (0,0)), mode='constant', constant_values=np.nan)[1:, :, :] + + return (daily_data_shift - daily_data)/2. # X(t+1) - X(t), with nan values for the last time dimension + def model_std_dev(data, start_year, time, season='djf'): - ''' - Data input should be in the format (time, lat, lon) - We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size of the time dimension - ''' - # convert time as numpy array - time = np.asarray(time) - - # getting the datetime values for the time index - dates_month=[] - dates_year=[] - for i_time in time: - temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=np.float(i_time)-1) - dates_month.append(temp_time.month) - dates_year.append(temp_time.year) - - dates_month = np.asarray(dates_month) - dates_year = np.asarray(dates_year) - - eddy_year = [] - for i_year in range(int(os.environ['FIRSTYR']), int(os.environ['LASTYR'])+1): - if (season == 'djf'): - time_ind = ((dates_year == i_year) & (dates_month == 1)) | ((dates_year == i_year) & (dates_month == 2)) | ((dates_year == i_year-1) & (dates_month == 12)) - elif (season == 'mam'): - time_ind = ((dates_year == i_year) & (dates_month == 3)) | ((dates_year == i_year) & (dates_month == 4)) | ((dates_year == i_year) & (dates_month == 5)) - elif (season == 'jja'): - time_ind = ((dates_year == i_year) & (dates_month == 6)) | ((dates_year == i_year) & (dates_month == 7)) | ((dates_year == i_year) & (dates_month == 8)) - elif (season == 'son'): - time_ind = ((dates_year == i_year) & (dates_month == 9)) | ((dates_year == i_year) & (dates_month == 10)) | ((dates_year == i_year) & (dates_month == 11)) + """ + Data input should be in the format (time, lat, lon) + We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size + of the time dimension + """ + # convert time as numpy array + time = np.asarray(time) + + # getting the datetime values for the time index + dates_month = [] + dates_year = [] + for i_time in time: + temp_time = dt.datetime(start_year, 1, 1) + dt.timedelta(days=float(i_time)-1) + dates_month.append(temp_time.month) + dates_year.append(temp_time.year) + + dates_month = np.asarray(dates_month) + dates_year = np.asarray(dates_year) + + eddy_year = [] + for i_year in range(int(os.environ['startdate']), int(os.environ['enddate']) + 1): + if season == 'djf': + time_ind = (((dates_year == i_year) & (dates_month == 1)) | ((dates_year == i_year) & (dates_month == 2)) + | ((dates_year == i_year-1) & (dates_month == 12))) + elif season == 'mam': + time_ind = (((dates_year == i_year) & (dates_month == 3)) | ((dates_year == i_year) & (dates_month == 4)) + | ((dates_year == i_year) & (dates_month == 5))) + elif season == 'jja': + time_ind = (((dates_year == i_year) & (dates_month == 6)) | ((dates_year == i_year) & (dates_month == 7)) + | ((dates_year == i_year) & (dates_month == 8))) + elif season == 'son': + time_ind = (((dates_year == i_year) & (dates_month == 9)) | ((dates_year == i_year) & (dates_month == 10)) + | ((dates_year == i_year) & (dates_month == 11))) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + eddy_season_mean = np.sqrt(np.nanmean(data[time_ind, :, :] ** 2, axis=0)) + eddy_year.append(eddy_season_mean) with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - eddy_season_mean = np.sqrt(np.nanmean(data[time_ind, :, :] ** 2, axis=0)) - eddy_year.append(eddy_season_mean) + warnings.simplefilter("ignore", category=RuntimeWarning) + eddy_year = np.asarray(eddy_year) + out_std_dev = np.nanmean(eddy_year, axis=0) + zonal_mean = np.nanmean(out_std_dev, axis=1) + zonal_mean[zonal_mean == 0] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - eddy_year = np.asarray(eddy_year) - out_std_dev = np.nanmean(eddy_year, axis=0) - zonal_mean = np.nanmean(out_std_dev, axis=1) - zonal_mean[zonal_mean == 0] = np.nan + return out_std_dev, zonal_mean - return out_std_dev, zonal_mean def obs_std_dev(obs_data_file, obs_topo_file): - ''' - Data input should be in the format (time, lat, lon) - We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array of the size of the time dimension - ''' - - nc = Dataset(obs_data_file, 'r') - nc.set_auto_mask(False) - - in_lat = nc.variables['lat'][:] - in_lon = nc.variables['lon'][:] - in_time = nc.variables['time'][:] - - in_jf = nc.variables['jf_sq_eddy'][:] - in_mam = nc.variables['mam_sq_eddy'][:] - in_jja = nc.variables['jja_sq_eddy'][:] - in_son = nc.variables['son_sq_eddy'][:] - in_dec = nc.variables['dec_sq_eddy'][:] - - nc.close() - - # read in the topography information to filter before computing the zonal mean - nc = Dataset(obs_topo_file, 'r') - in_topo = nc.variables['topo'][:] - nc.close() - - topo_cond = (in_topo > 1000) - - djf_year = [] - mam_year = [] - jja_year = [] - son_year = [] - - start_year = int(os.environ['FIRSTYR']) - end_year = int(os.environ['LASTYR']) - - start_year = max([start_year, min(in_time)]) - end_year = min([end_year, max(in_time)]) - - for i_year in range(start_year, end_year+1): - - if not ((i_year == start_year)): - i_djf = np.squeeze(in_dec[in_time == i_year-1, :, :, :] + in_jf[in_time == i_year, :, :, :]) - i_djf = np.sqrt(i_djf[0, :, :]/i_djf[1, :, :]) - djf_year.append(i_djf) - - i_mam = np.squeeze(in_mam[in_time == i_year, :, :, :]) - i_mam = np.sqrt(i_mam[0, :, :]/i_mam[1, :, :]) - mam_year.append(i_mam) - - i_jja = np.squeeze(in_jja[in_time == i_year, :, :, :]) - i_jja = np.sqrt(i_jja[0, :, :]/i_jja[1, :, :]) - jja_year.append(i_jja) - - i_son = np.squeeze(in_son[in_time == i_year, :, :, :]) - i_son = np.sqrt(i_son[0, :, :]/i_son[1, :, :]) - son_year.append(i_son) - - djf_year = np.asarray(djf_year) - djf = np.nanmean(djf_year, axis=0) - djf[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_djf = np.nanmean(djf, axis=1) - zonal_djf[zonal_djf == 0] = np.nan + """ + Data input should be in the format (time, lat, lon) + We will calculate the std_dev for the given time_ind, the time_ind has to be a logical array + of the size of the time dimension + """ + + nc = Dataset(obs_data_file, 'r') + nc.set_auto_mask(False) + + in_lat = nc.variables['lat'][:] + in_lon = nc.variables['lon'][:] + in_time = nc.variables['time'][:] + + in_jf = nc.variables['jf_sq_eddy'][:] + in_mam = nc.variables['mam_sq_eddy'][:] + in_jja = nc.variables['jja_sq_eddy'][:] + in_son = nc.variables['son_sq_eddy'][:] + in_dec = nc.variables['dec_sq_eddy'][:] + + nc.close() + + # read in the topography information to filter before computing the zonal mean + nc = Dataset(obs_topo_file, 'r') + in_topo = nc.variables['topo'][:] + nc.close() + + topo_cond = (in_topo > 1000) + + djf_year = [] + mam_year = [] + jja_year = [] + son_year = [] + + start_year = int(os.environ['startdate']) + end_year = int(os.environ['enddate']) + + start_year = max([start_year, min(in_time)]) + end_year = min([end_year, max(in_time)]) + + for i_year in range(start_year, end_year+1): + if not i_year == start_year: + i_djf = np.squeeze(in_dec[in_time == i_year-1, :, :, :] + in_jf[in_time == i_year, :, :, :]) + i_djf = np.sqrt(i_djf[0, :, :]/i_djf[1, :, :]) + djf_year.append(i_djf) + + i_mam = np.squeeze(in_mam[in_time == i_year, :, :, :]) + i_mam = np.sqrt(i_mam[0, :, :]/i_mam[1, :, :]) + mam_year.append(i_mam) + + i_jja = np.squeeze(in_jja[in_time == i_year, :, :, :]) + i_jja = np.sqrt(i_jja[0, :, :]/i_jja[1, :, :]) + jja_year.append(i_jja) + + i_son = np.squeeze(in_son[in_time == i_year, :, :, :]) + i_son = np.sqrt(i_son[0, :, :]/i_son[1, :, :]) + son_year.append(i_son) + + djf_year = np.asarray(djf_year) + djf = np.nanmean(djf_year, axis=0) + djf[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_djf = np.nanmean(djf, axis=1) + zonal_djf[zonal_djf == 0] = np.nan - mam_year = np.asarray(mam_year) - mam = np.nanmean(mam_year, axis=0) - mam[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_mam = np.nanmean(mam, axis=1) - zonal_mam[zonal_mam == 0] = np.nan + mam_year = np.asarray(mam_year) + mam = np.nanmean(mam_year, axis=0) + mam[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_mam = np.nanmean(mam, axis=1) + zonal_mam[zonal_mam == 0] = np.nan - jja_year = np.asarray(jja_year) - jja = np.nanmean(jja_year, axis=0) - jja[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_jja = np.nanmean(jja, axis=1) - zonal_jja[zonal_jja == 0] = np.nan + jja_year = np.asarray(jja_year) + jja = np.nanmean(jja_year, axis=0) + jja[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_jja = np.nanmean(jja, axis=1) + zonal_jja[zonal_jja == 0] = np.nan - son_year = np.asarray(son_year) - son = np.nanmean(son_year, axis=0) - son[topo_cond] = np.nan - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=RuntimeWarning) - zonal_son = np.nanmean(son, axis=1) - zonal_son[zonal_son == 0] = np.nan + son_year = np.asarray(son_year) + son = np.nanmean(son_year, axis=0) + son[topo_cond] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + zonal_son = np.nanmean(son, axis=1) + zonal_son[zonal_son == 0] = np.nan - lonGrid, latGrid = np.meshgrid(in_lon, in_lat) - - zonal_means = {'djf': zonal_djf, 'jja': zonal_jja, 'son': zonal_son, 'mam': zonal_mam, 'lat': in_lat} + lonGrid, latGrid = np.meshgrid(in_lon, in_lat) - return latGrid, lonGrid, djf, mam, jja, son, start_year, end_year, zonal_means + zonal_means = {'djf': zonal_djf, 'jja': zonal_jja, 'son': zonal_son, 'mam': zonal_mam, 'lat': in_lat} + return latGrid, lonGrid, djf, mam, jja, son, start_year, end_year, zonal_means diff --git a/diagnostics/eulerian_storm_track/plotter.py b/diagnostics/eulerian_storm_track/plotter.py index 294fe9ad4..3d2e9f440 100755 --- a/diagnostics/eulerian_storm_track/plotter.py +++ b/diagnostics/eulerian_storm_track/plotter.py @@ -6,90 +6,90 @@ from cartopy.util import add_cyclic_point import numpy as np -def plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file=''): - - plt.close('all') - - plt.figure(figsize=(8,12)) - plt.subplot(2,2,1) - plt.plot(model_zonal_means['djf'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['djf'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['djf'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('DJF') - plt.legend(loc=0) - plt.ylim(-80, 80) - plt.ylabel('Latitude') - - plt.subplot(2,2,2) - plt.plot(model_zonal_means['jja'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['jja'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['jja'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('JJA') - plt.legend(loc=0) - plt.ylim(-80, 80) - - plt.subplot(2,2,3) - plt.plot(model_zonal_means['mam'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['mam'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['mam'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('MAM') - plt.legend(loc=0) - plt.ylim(-80, 80) - plt.ylabel('Latitude') - plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') - - plt.subplot(2,2,4) - plt.plot(model_zonal_means['son'], model_zonal_means['lat'], color='r', label='Model', ls='--') - plt.plot(erai_zonal_means['son'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') - plt.plot(era5_zonal_means['son'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') - plt.title('SON') - plt.legend(loc=0) - plt.ylim(-80, 80) - plt.ylabel('Latitude') - plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') - - plt.tight_layout() - if (len(out_file) > 0): - if (out_file.endswith('.ps')): - plt.savefig(out_file, format='eps', dpi=300.) - plt.close('all') - elif (out_file.endswith('.png')): - plt.savefig(out_file, format='png', dpi=300.) - plt.close('all') - - -def plot(lonGrid, latGrid, data, show=False, out_file='', title='', **kwargs): - - plt.close('all') - - plt.figure() - - # adding cyclic point - # provided the values are given as lat x lon - lons = lonGrid[0,:] - lats = latGrid[:,0] - - new_data, new_lons = add_cyclic_point(data, coord=lons) - new_lonGrid, new_latGrid = np.meshgrid(new_lons, lats) - - ax = plt.axes(projection=cartopy.crs.PlateCarree()) - ax.coastlines() - # getting rid of the line due to lack of continuity - _ = plt.contourf(new_lonGrid, new_latGrid, new_data, cmap='jet', **kwargs) - cb = plt.colorbar(ax=ax, shrink=0.5) - cb.ax.set_ylabel(r'$\tilde{V}^{st}_{850}$ [m/s]') - - if (len(title) > 0): - plt.title(title) - - if (show): - plt.show() - - if (len(out_file) > 0): - if (out_file.endswith('.ps')): - plt.savefig(out_file, format='eps', dpi=300.) - plt.close('all') - elif (out_file.endswith('.png')): - plt.savefig(out_file, format='png', dpi=300.) - plt.close('all') +def plot_zonal(model_zonal_means, erai_zonal_means, era5_zonal_means, out_file: str = ''): + + plt.close('all') + + plt.figure(figsize=(8,12)) + plt.subplot(2,2,1) + plt.plot(model_zonal_means['djf'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['djf'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['djf'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('DJF') + plt.legend(loc=0) + plt.ylim(-80, 80) + plt.ylabel('Latitude') + + plt.subplot(2,2,2) + plt.plot(model_zonal_means['jja'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['jja'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['jja'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('JJA') + plt.legend(loc=0) + plt.ylim(-80, 80) + + plt.subplot(2, 2, 3) + plt.plot(model_zonal_means['mam'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['mam'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['mam'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('MAM') + plt.legend(loc=0) + plt.ylim(-80, 80) + plt.ylabel('Latitude') + plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') + + plt.subplot(2,2,4) + plt.plot(model_zonal_means['son'], model_zonal_means['lat'], color='r', label='Model', ls='--') + plt.plot(erai_zonal_means['son'], erai_zonal_means['lat'], color='b', label='ERA-Interim', ls='--') + plt.plot(era5_zonal_means['son'], era5_zonal_means['lat'], color='g', label='ERA-5', ls='--') + plt.title('SON') + plt.legend(loc=0) + plt.ylim(-80, 80) + plt.ylabel('Latitude') + plt.xlabel(r'$\tilde{V}^{st}_{850}$ [m/s]') + + plt.tight_layout() + if len(out_file) > 0: + if out_file.endswith('.ps'): + plt.savefig(out_file, format='eps', dpi=300.) + plt.close('all') + elif out_file.endswith('.png'): + plt.savefig(out_file, format='png', dpi=300.) + plt.close('all') + + +def plot(lonGrid, latGrid, data, show: bool = False, out_file: str = '', title: str ='', **kwargs): + + plt.close('all') + + plt.figure() + + # adding cyclic point + # provided the values are given as lat x lon + lons = lonGrid[0,:] + lats = latGrid[:,0] + + new_data, new_lons = add_cyclic_point(data, coord=lons) + new_lonGrid, new_latGrid = np.meshgrid(new_lons, lats) + + ax = plt.axes(projection=cartopy.crs.PlateCarree()) + ax.coastlines() + # getting rid of the line due to lack of continuity + _ = plt.contourf(new_lonGrid, new_latGrid, new_data, cmap='jet', **kwargs) + cb = plt.colorbar(ax=ax, shrink=0.5) + cb.ax.set_ylabel(r'$\tilde{V}^{st}_{850}$ [m/s]') + + if len(title) > 0: + plt.title(title) + + if show: + plt.show() + + if len(out_file) > 0: + if out_file.endswith('.ps'): + plt.savefig(out_file, format='eps', dpi=300.) + plt.close('all') + elif out_file.endswith('.png'): + plt.savefig(out_file, format='png', dpi=300.) + plt.close('all') diff --git a/diagnostics/eulerian_storm_track/settings.jsonc b/diagnostics/eulerian_storm_track/settings.jsonc index 75739cd65..a443b9964 100755 --- a/diagnostics/eulerian_storm_track/settings.jsonc +++ b/diagnostics/eulerian_storm_track/settings.jsonc @@ -2,7 +2,7 @@ "settings" : { "driver" : "eulerian_storm_track.py", "long_name" : "Eulerian Storm Track", - "realm" : "atmos", + "convention": "cmip", "description" : "Eulerian Storm Track", "pod_env_vars" : { "lat_var": "lat", @@ -13,8 +13,16 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"}, "lev": { "standard_name": "air_pressure", @@ -29,6 +37,7 @@ "varlist" : { "v850": { "standard_name" : "northward_wind", + "realm": "atmos", "units": "m s-1", "dimensions": ["time", "lat", "lon"], "use_exact_name": true, diff --git a/diagnostics/example/example.html b/diagnostics/example/example.html index 9822edece..050ca8838 100644 --- a/diagnostics/example/example.html +++ b/diagnostics/example/example.html @@ -28,7 +28,7 @@

Example diagnostic: time-averaged near-surface temperature

- diff --git a/diagnostics/example/example_diag.py b/diagnostics/example/example_diag.py index 9d1c38a34..579cf8618 100644 --- a/diagnostics/example/example_diag.py +++ b/diagnostics/example/example_diag.py @@ -68,15 +68,15 @@ # import os import matplotlib -matplotlib.use('Agg') # non-X windows backend +matplotlib.use('Agg') # non-X windows backend # Commands to load third-party libraries. Any code you don't include that's # not part of your language's standard library should be listed in the # settings.jsonc file. import xarray as xr # python library we use to read netcdf files import matplotlib.pyplot as plt # python library we use to make plots +import sys - -### 1) Loading model data files: ############################################### +# 1) Loading model data files: ############################################### # # The framework copies model data to a regular directory structure of the form # //...nc @@ -88,12 +88,12 @@ # script know where the locally downloaded copy of the data for this variable # (which we called "tas") is. input_path = os.environ["TAS_FILE"] +print('TAS_FILE is:', input_path) # command to load the netcdf file model_dataset = xr.open_dataset(input_path) - -### 2) Doing computations: ##################################################### +# 2) Doing computations: ##################################################### # # Diagnostics in the framework are intended to work with native output from a # variety of models. For this reason, variable names should not be hard-coded @@ -106,7 +106,7 @@ # The only computation done here: compute the time average of input data tas_data = model_dataset[tas_var_name] -model_mean_tas = tas_data.mean(dim = time_coord_name) +model_mean_tas = tas_data.mean(dim=time_coord_name) # Note that we supplied the observational data as time averages, to save space # and avoid having to repeat that calculation each time the diagnostic is run. @@ -114,22 +114,23 @@ # your diagnostic prints to STDOUT will be saved to its own log file. print("Computed time average of {tas_var} for {CASENAME}.".format(**os.environ)) - -### 3) Saving output data: ##################################################### +# 3) Saving output data: ##################################################### # # Diagnostics should write output data to disk to a) make relevant results # available to the user for further use or b) to pass large amounts of data # between stages of a calculation run as different sub-scripts. Data can be in # any format (as long as it's documented) and should be written to the -# directory /model/netCDF (created by the framework). +# directory /model/netCDF (created by the framework). # -out_path = "{WK_DIR}/model/netCDF/temp_means.nc".format(**os.environ) +WORK_DIR = os.environ['WORK_DIR'] +out_dir = os.path.join(WORK_DIR, "model") +assert os.path.isdir(out_dir), f'{out_dir} not found' +out_path = os.path.join(out_dir, "temp_means.nc") # write out time averages as a netcdf file model_mean_tas.to_netcdf(out_path) - -### 4) Saving output plots: #################################################### +# 4) Saving output plots: #################################################### # # Plots should be saved in EPS or PS format at //PS # (created by the framework). Plots can be given any filename, but should have @@ -138,27 +139,30 @@ # Define a python function to make the plot, since we'll be doing it twice and # we don't want to repeat ourselves. + + def plot_and_save_figure(model_or_obs, title_string, dataset): # initialize the plot - plt.figure(figsize=(12,6)) - plot_axes = plt.subplot(1,1,1) + plt.figure(figsize=(12, 6)) + plot_axes = plt.subplot(1, 1, 1) # actually plot the data (makes a lat-lon colormap) - dataset.plot(ax = plot_axes) + dataset.plot(ax=plot_axes) plot_axes.set_title(title_string) # save the plot in the right location - plot_path = "{WK_DIR}/{model_or_obs}/PS/example_{model_or_obs}_plot.eps".format( + plot_path = "{WORK_DIR}/{model_or_obs}/PS/example_{model_or_obs}_plot.eps".format( model_or_obs=model_or_obs, **os.environ ) plt.savefig(plot_path, bbox_inches='tight') # end of function # set an informative title using info about the analysis set in env vars -title_string = "{CASENAME}: mean {tas_var} ({FIRSTYR}-{LASTYR})".format(**os.environ) + + +title_string = "{CASENAME}: mean {tas_var} ({startdate}-{enddate})".format(**os.environ) # Plot the model data: plot_and_save_figure("model", title_string, model_mean_tas) - -### 5) Loading obs data files & plotting obs figures: ########################## +# 5) Loading obs data files & plotting obs figures: ########################## # # If your diagnostic uses any model-independent supporting data (eg. reference # or observational data) larger than a few kB of text, it should be provided via @@ -170,7 +174,7 @@ def plot_and_save_figure(model_or_obs, title_string, dataset): # The following command replaces the substring "{OBS_DATA}" with the value of # the OBS_DATA environment variable. input_path = "{OBS_DATA}/example_tas_means.nc".format(**os.environ) - +print(input_path) # command to load the netcdf file obs_dataset = xr.open_dataset(input_path) obs_mean_tas = obs_dataset['mean_tas'] @@ -180,7 +184,7 @@ def plot_and_save_figure(model_or_obs, title_string, dataset): plot_and_save_figure("obs", title_string, obs_mean_tas) -### 6) Cleaning up: ############################################################ +# 6) Cleaning up: ############################################################ # # In addition to your language's normal housekeeping, don't forget to delete any # temporary/scratch files you created in step 4). @@ -188,8 +192,7 @@ def plot_and_save_figure(model_or_obs, title_string, dataset): model_dataset.close() obs_dataset.close() - -### 7) Error/Exception-Handling Example ######################################## +# 7) Error/Exception-Handling Example ######################################## nonexistent_file_path = "{DATADIR}/mon/nonexistent_file.nc".format(**os.environ) try: nonexistent_dataset = xr.open_dataset(nonexistent_file_path) @@ -197,6 +200,6 @@ def plot_and_save_figure(model_or_obs, title_string, dataset): print(error) print("This message is printed by the example POD because exception-handling is working!") - -### 8) Confirm POD executed sucessfully ######################################## +# 8) Confirm POD executed successfully ######################################## print("Last log message by Example POD: finished successfully!") +sys.exit(0) diff --git a/diagnostics/example/settings.jsonc b/diagnostics/example/settings.jsonc index ca4144d4d..dbdcf044a 100644 --- a/diagnostics/example/settings.jsonc +++ b/diagnostics/example/settings.jsonc @@ -15,9 +15,8 @@ // Human-readable name of the diagnostic. May contain spaces. "long_name" : "Example diagnostic", - // Modeling realm. If your diagnostic uses data from multiple realms, give - // this as a list. - "realm" : "atmos", + // Data convention expected by the POD: cmip (default), cesm, or gfdl + "convention": "cmip", // Human-readable name of the diagnostic. May contain spaces. This // is used to describe your diagnostic on the top-level index.html page. @@ -43,10 +42,19 @@ // In this section, you define the names and units for the coordinates used by // the variables defined below in the "varlist" section. Names in the // "dimensions" attribute for each variable must correspond to a coordinate - // named here. + // named here. Units must be specified for horizontal and vertical coordinates. + // If a dimension is unitless, set "units" to "1". "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"} }, @@ -62,6 +70,9 @@ // name in the output format of the model being analyzed. "standard_name" : "air_temperature", + // modeling realm of the variable: atmos, ocean, land, seaIce, landIce, etc... + "realm": "atmos", + // Units the script expects this variable to be in (UDUnits2- compatible // string). If the model being analyzed provides the variable in different // units, the MDTF package will do unit conversion before calling the script. @@ -69,7 +80,7 @@ // Time frequency the data should be sampled at. Currently recognized // values are '1hr', '3hr', '6hr', 'day' and 'mon'. - "frequency" : "mon", + "frequency" : "day", // Coordinates of the variable (defined in the section above.) "dimensions": ["time", "lat", "lon"], diff --git a/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.csv b/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.csv new file mode 100644 index 000000000..39e83f741 --- /dev/null +++ b/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.csv @@ -0,0 +1,786 @@ +activity_id,institution_id,source_id,experiment_id,frequency,modeling_realm,table_id,member_id,grid_label,variable_id,temporal_subset,chunk_freq,grid_label,platform,dimensions,cell_methods,path +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0002010100-0002123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0002010100-0002123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0003010100-0003123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0003010100-0003123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0004010100-0004123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0004010100-0004123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0005010100-0005123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0005010100-0005123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0006010100-0006123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0006010100-0006123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0007010100-0007123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0007010100-0007123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0008010100-0008123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0008010100-0008123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0009010100-0009123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0009010100-0009123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0010010100-0010123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0010010100-0010123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0011010100-0011123123.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,3hr,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/3hr/1yr/atmos_cmip.0011010100-0011123123.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00020101-00021231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00030101-00031231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00040101-00041231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00050101-00051231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00060101-00061231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00070101-00071231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00080101-00081231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00090101-00091231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00100101-00101231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.tasmax.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.tasmin.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.ua10.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.ua200.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.ua850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.va850.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.00110101-00111231.zg500.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000201-000212.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000301-000312.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000401-000412.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000501-000512.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000601-000612.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000701-000712.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000801-000812.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.000901-000912.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001001-001012.zg_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,albs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.albs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hur.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.o3.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ta.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ua.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendnogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.utendnogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,utendogw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.utendogw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.va.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.wap.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.zg.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ccb,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ccb.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,cct,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.cct.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clivi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.clivi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.clt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,clwvi,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.clwvi.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,evspsbl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.evspsbl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfls,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hfls.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hfss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hfss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hurs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hur_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hur_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.huss.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,hus_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.hus_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,o3_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.o3_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prc,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.prc.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.pr.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prsn,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.prsn.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,prw,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.prw.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.psl.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ps,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ps.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rldscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rldscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rlds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rlus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rlutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rlut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rlut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsdscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsds,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsds.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsdt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsdt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsuscs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsuscs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsus,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsus.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsutcs,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsutcs.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rsut,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rsut.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,rtmt,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.rtmt.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sci,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.sci.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.sfcWind.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.tas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ta_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ta_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauu,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.tauu.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,tauv,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.tauv.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ts,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ts.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.uas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,ua_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.ua_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.vas.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,va_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.va_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,wap_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.wap_unmsk.nc +dev,,,c96L65_am5f3b1r0_pdclim1850F,monthly,atmos_cmip,,n/a,,zg_unmsk,,,,,,,/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/monthly/1yr/atmos_cmip.001101-001112.zg_unmsk.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.sfcWind.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.hurs.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.tasmax.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.huss.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.tasmin.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.pr.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.tas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.psl.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.ua10.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.sfcWind.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.ua850.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.tasmax.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.ua200.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.tasmin.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.uas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.tas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.va850.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.ua10.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.vas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.ua200.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.zg500.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.ua850.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.uas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.va850.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.vas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19800101-19801231.zg500.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.hurs.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.huss.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.pr.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.psl.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,sfcWind,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.sfcWind.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tasmax,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.tasmax.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tasmin,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.tasmin.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.tas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua10,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.ua10.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua200,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.ua200.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,ua850,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.ua850.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,uas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.uas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,va850,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.va850.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,vas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.vas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,zg500,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.zg500.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,hurs,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.hurs.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,huss,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.huss.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,pr,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.pr.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,psl,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.psl.nc diff --git a/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json b/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json new file mode 100644 index 000000000..715273cbe --- /dev/null +++ b/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json @@ -0,0 +1,111 @@ +{ + "esmcat_version": "0.0.1", + "attributes": [ + { + "column_name": "activity_id", + "vocabulary": "" + }, + { + "column_name": "institution_id", + "vocabulary": "" + }, + { + "column_name": "source_id", + "vocabulary": "" + }, + { + "column_name": "experiment_id", + "vocabulary": "" + }, + { + "column_name": "frequency", + "vocabulary": "" + }, + { + "column_name": "modeling_realm", + "vocabulary": "" + }, + { + "column_name": "table_id", + "vocabulary": "" + }, + { + "column_name": "member_id", + "vocabulary": "" + }, + { + "column_name": "grid_label", + "vocabulary": "" + }, + { + "column_name": "variable_id", + "vocabulary": "" + }, + { + "column_name": "temporal_subset", + "vocabulary": "" + }, + { + "column_name": "chunk_freq", + "vocabulary": "" + }, + { + "column_name": "grid_label", + "vocabulary": "" + }, + { + "column_name":"platform", + "vocabulary": "" + }, + { + "column_name": "platform", + "vocabulary": "" + }, + { + "column_name": "cell_methods", + "vocabulary": "" + }, + { + "column_name": "path", + "vocabulary": "" + } + ], + "assets": { + "column_name": "path", + "format": "netcdf", + "format_column_name": null + }, + "aggregation_control": { + "variable_column_name": "variable_id", + "groupby_attrs": [ + "source_id", + "experiment_id", + "frequency", + "member_id", + "modeling_realm", + "variable_id", + "chunk_freq" + ], + "aggregations": [ + { + "type": "union", + "attribute_name": "variable_id", + "options": {} + }, + { + "type": "join_existing", + "attribute_name": "temporal_subset", + "options": { + "dim": "time", + "coords": "minimal", + "compat": "override" + } + } + ] + }, + "id": "esm_catalog_ESM4", + "description": null, + "title": null, + "last_updated": "2023-05-07T16:35:52Z", + "catalog_file": "/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.csv" +} diff --git a/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.csv b/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.csv new file mode 100644 index 000000000..835a388a8 --- /dev/null +++ b/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.csv @@ -0,0 +1,3 @@ +activity_id,institution_id,source_id,experiment_id,frequency,modeling_realm,table_id,member_id,grid_label,variable_id,temporal_subset,chunk_freq,grid_label,platform,dimensions,cell_methods,path +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-19811231.tas.nc +dev,,,c384L65_am5f3b1r0_amip,daily,atmos_cmip,,n/a,,tas,,,,,,,/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.tas.nc diff --git a/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.json b/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.json new file mode 100644 index 000000000..4e06754c7 --- /dev/null +++ b/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.json @@ -0,0 +1,111 @@ +{ + "esmcat_version": "0.0.1", + "attributes": [ + { + "column_name": "activity_id", + "vocabulary": "" + }, + { + "column_name": "institution_id", + "vocabulary": "" + }, + { + "column_name": "source_id", + "vocabulary": "" + }, + { + "column_name": "experiment_id", + "vocabulary": "" + }, + { + "column_name": "frequency", + "vocabulary": "" + }, + { + "column_name": "modeling_realm", + "vocabulary": "" + }, + { + "column_name": "table_id", + "vocabulary": "" + }, + { + "column_name": "member_id", + "vocabulary": "" + }, + { + "column_name": "grid_label", + "vocabulary": "" + }, + { + "column_name": "variable_id", + "vocabulary": "" + }, + { + "column_name": "temporal_subset", + "vocabulary": "" + }, + { + "column_name": "chunk_freq", + "vocabulary": "" + }, + { + "column_name": "grid_label", + "vocabulary": "" + }, + { + "column_name":"platform", + "vocabulary": "" + }, + { + "column_name": "platform", + "vocabulary": "" + }, + { + "column_name": "cell_methods", + "vocabulary": "" + }, + { + "column_name": "path", + "vocabulary": "" + } + ], + "assets": { + "column_name": "path", + "format": "netcdf", + "format_column_name": null + }, + "aggregation_control": { + "variable_column_name": "variable_id", + "groupby_attrs": [ + "source_id", + "experiment_id", + "frequency", + "member_id", + "modeling_realm", + "variable_id", + "chunk_freq" + ], + "aggregations": [ + { + "type": "union", + "attribute_name": "variable_id", + "options": {} + }, + { + "type": "join_existing", + "attribute_name": "temporal_subset", + "options": { + "dim": "time", + "coords": "minimal", + "compat": "override" + } + } + ] + }, + "id": "esm_catalog_ESM4", + "description": null, + "title": null, + "last_updated": "2023-05-07T16:35:52Z", + "catalog_file": "/home/a1r/github/MDTF-diagnostics/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.csv" +} diff --git a/diagnostics/example_multicase/case_info.yaml b/diagnostics/example_multicase/case_info.yaml deleted file mode 100644 index 365f590d5..000000000 --- a/diagnostics/example_multicase/case_info.yaml +++ /dev/null @@ -1,32 +0,0 @@ -CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231: - CASENAME: CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231 - FIRSTYR: 1980 - LASTYR: 1984 - MPLBACKEND: Agg - RGB: /home/jessica.liptak/mdtf/MDTF-diagnostics/shared/rgb - TAS_ASSOC_FILES: '' - TAS_FILE: /home/jessica.liptak/mdtf/wkdir/example_multicase/MDTF_CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231_1980_1984/day/CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231.tas.day.nc - date_int_offset: 0 - lat_bnds: lat_bnds - lat_coord: lat - lon_bnds: lon_bnds - lon_coord: lon - tas_var: tas - time_bnds: time_bnds - time_coord: time -CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231: - CASENAME: CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231 - FIRSTYR: 1985 - LASTYR: 1989 - MPLBACKEND: Agg - RGB: /home/jessica.liptak/mdtf/MDTF-diagnostics/shared/rgb - TAS_ASSOC_FILES: '' - TAS_FILE: /home/jessica.liptak/mdtf/wkdir/example_multicase/MDTF_CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231_1985_1989/day/CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231.tas.day.nc - date_int_offset: 0 - lat_bnds: lat_bnds - lat_coord: lat - lon_bnds: lon_bnds - lon_coord: lon - tas_var: tas - time_bnds: time_bnds - time_coord: time diff --git a/diagnostics/example_multicase/case_info_demo.yml b/diagnostics/example_multicase/case_info_demo.yml new file mode 100644 index 000000000..00b36349c --- /dev/null +++ b/diagnostics/example_multicase/case_info_demo.yml @@ -0,0 +1,30 @@ +CATALOG_FILE: "/home/a1r/github/MDTF-diagnostics/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.json" +CASE_LIST: + c384L65_am5f3b1r0_amip: + case_name: c384L65_am5f3b1r0_amip: + startdate: '19810101' + enddate: '19821231' + TAS_ASSOC_FILES: '' + TAS_FILE: /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-198121231.tas.nc + date_int_offset: 0 + lat_bnds: lat_bnds + lat_coord: lat + lon_bnds: lon_bnds + lon_coord: lon + tas_var: tas + time_bnds: time_bnds + time_coord: time + c384L65_am5f3b1r0_amip: + case_name: c384L65_am5f3b1r0_amip + startdate: '19810101' + enddate: '19821231' + TAS_ASSOC_FILES: '' + TAS_FILE: /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19820101-19821231.tas.nc + date_int_offset: 0 + lat_bnds: lat_bnds + lat_coord: lat + lon_bnds: lon_bnds + lon_coord: lon + tas_var: tas + time_bnds: time_bnds + time_coord: time diff --git a/diagnostics/example_multicase/case_info_demo2.yml b/diagnostics/example_multicase/case_info_demo2.yml new file mode 100644 index 000000000..fbd3425aa --- /dev/null +++ b/diagnostics/example_multicase/case_info_demo2.yml @@ -0,0 +1,30 @@ +CATALOG_FILE: "/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json" +CASE_LIST: + c384L65_am5f3b1r0_amip: + case_name: c384L65_am5f3b1r0_amip + startdate: '19810101' + enddate: '19821231' + TAS_ASSOC_FILES: '' + TAS_FILE: /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_amip/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.19810101-198121231.tas.nc + date_int_offset: 0 + lat_bnds: lat_bnds + lat_coord: lat + lon_bnds: lon_bnds + lon_coord: lon + tas_var: tas + time_bnds: time_bnds + time_coord: time + amip_c96L65_am5f3b1r0_pdclim1850F: + case_name: amip_c96L65_am5f3b1r0_pdclim1850F + startdate: '00050101' + enddate: '00061231' + TAS_ASSOC_FILES: '' + TAS_FILE: /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pdclim1850F/gfdl.ncrc5-deploy-prod-openmp/pp/atmos_cmip/ts/daily/1yr/atmos_cmip.0005010100-0005123123.tas.nc + date_int_offset: 0 + lat_bnds: lat_bnds + lat_coord: lat + lon_bnds: lon_bnds + lon_coord: lon + tas_var: tas + time_bnds: time_bnds + time_coord: time diff --git a/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.csv b/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.csv new file mode 100644 index 000000000..cfc4e04a4 --- /dev/null +++ b/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.csv @@ -0,0 +1,3 @@ +activity_id,branch_method,branch_time_in_child,branch_time_in_parent,experiment,experiment_id,frequency,grid,grid_label,institution_id,nominal_resolution,parent_activity_id,parent_experiment_id,parent_source_id,parent_time_units,parent_variant_label,product,realm,source_id,source_type,sub_experiment,sub_experiment_id,table_id,variable_id,variant_label,member_id,standard_name,long_name,units,vertical_levels,init_year,start_time,end_time,time_range,path,version +CMIP,standard,,,,synthetic,day,,gr,,,CMIP,,,days since 1980-01-01,r1i1p1f1,,atmos,,,none,none,day,tas,r1i1p1f1,r1i1p1f1,air_temperature,Near-Surface Air Temperature,K,1,,1980-01-01,1984-12-31,1980-01-01-1984-12-31,/net/jml/mdtf/inputdata/mdtf_test_data/CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231/day/CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231.tas.day.nc,none +CMIP,standard,,,,synthetic,day,,gr,,,CMIP,,,days since 1985-01-01,r1i1p1f1,,atmos,,,none,none,day,tas,r1i1p1f1,r1i1p1f1,air_temperature,Near-Surface Air Temperature,K,1,,1985-01-01,1989-12-31,1985-01-01-1989-12-31,/net/jml/mdtf/inputdata/mdtf_test_data/CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231/day/CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231.tas.day.nc,none diff --git a/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json b/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json new file mode 100644 index 000000000..447435af8 --- /dev/null +++ b/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json @@ -0,0 +1,195 @@ +{ + "esmcat_version": "0.0.1", + "attributes": [ + { + "column_name": "activity_id", + "vocabulary": "" + }, + { + "column_name": "branch_method", + "vocabulary": "" + }, + { + "column_name": "branch_time_in_child", + "vocabulary": "" + }, + { + "column_name": "branch_time_in_parent", + "vocabulary": "" + }, + { + "column_name": "experiment", + "vocabulary": "" + }, + { + "column_name": "experiment_id", + "vocabulary": "" + }, + { + "column_name": "frequency", + "vocabulary": "" + }, + { + "column_name": "grid", + "vocabulary": "" + }, + { + "column_name": "grid_label", + "vocabulary": "" + }, + { + "column_name": "institution_id", + "vocabulary": "" + }, + { + "column_name": "nominal_resolution", + "vocabulary": "" + }, + { + "column_name": "parent_activity_id", + "vocabulary": "" + }, + { + "column_name": "parent_experiment_id", + "vocabulary": "" + }, + { + "column_name": "parent_source_id", + "vocabulary": "" + }, + { + "column_name": "parent_time_units", + "vocabulary": "" + }, + { + "column_name": "parent_variant_label", + "vocabulary": "" + }, + { + "column_name": "product", + "vocabulary": "" + }, + { + "column_name": "realm", + "vocabulary": "" + }, + { + "column_name": "source_id", + "vocabulary": "" + }, + { + "column_name": "source_type", + "vocabulary": "" + }, + { + "column_name": "sub_experiment", + "vocabulary": "" + }, + { + "column_name": "sub_experiment_id", + "vocabulary": "" + }, + { + "column_name": "table_id", + "vocabulary": "" + }, + { + "column_name": "variable_id", + "vocabulary": "" + }, + { + "column_name": "variant_label", + "vocabulary": "" + }, + { + "column_name": "member_id", + "vocabulary": "" + }, + { + "column_name": "standard_name", + "vocabulary": "" + }, + { + "column_name": "long_name", + "vocabulary": "" + }, + { + "column_name": "units", + "vocabulary": "" + }, + { + "column_name": "vertical_levels", + "vocabulary": "" + }, + { + "column_name": "init_year", + "vocabulary": "" + }, + { + "column_name": "start_time", + "vocabulary": "" + }, + { + "column_name": "end_time", + "vocabulary": "" + }, + { + "column_name": "time_range", + "vocabulary": "" + }, + { + "column_name": "path", + "vocabulary": "" + }, + { + "column_name": "version", + "vocabulary": "" + } + ], + "assets": { + "column_name": "path", + "format": "netcdf", + "format_column_name": null + }, + "aggregation_control": { + "variable_column_name": "variable_id", + "groupby_attrs": [ + "activity_id", + "institution_id", + "source_id", + "experiment_id", + "frequency", + "member_id", + "table_id", + "grid_label", + "realm", + "variant_label", + "time_range" + ], + "aggregations": [ + { + "type": "union", + "attribute_name": "variable_id", + "options": {} + }, + { + "type": "join_existing", + "attribute_name": "time_range", + "options": { + "dim": "time", + "coords": "minimal", + "compat": "override" + } + } + ] + }, + "id": "esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.csv", + "description": null, + "title": null, + "last_updated": "2023-06-01", +<<<<<<< Updated upstream + "catalog_file": "file:/Users/jess/mdtf/MDTF-diagnostics/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.csv" +======= + "catalog_file": "file:/net/jml/mdtf/MDTF-diagnostics/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.csv" +>>>>>>> Stashed changes +} \ No newline at end of file diff --git a/diagnostics/example_multicase/example_case_info_output.yml b/diagnostics/example_multicase/example_case_info_output.yml new file mode 100644 index 000000000..336bb8de4 --- /dev/null +++ b/diagnostics/example_multicase/example_case_info_output.yml @@ -0,0 +1,24 @@ +CASE_LIST: + CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231: + TAS_ASSOC_FILES: '' + TAS_FILE: /home/mdtf/wkdir/MDTF_output/MDTF_CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231_19800101120000_19841231000000/day/CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231.tas.day.nc + case_name: CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231 + convention: CMIP + enddate: '19841231000000' + lat_coord: lat + lon_coord: lon + startdate: '19800101120000' + tas_var: tas + time_coord: time + CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231: + TAS_ASSOC_FILES: '' + TAS_FILE: /home/mdtf/wkdir/MDTF_output/MDTF_CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231/day/CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231.tas.day.nc + case_name: CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231 + convention: CMIP + enddate: '19891231' + lat_coord: lat + lon_coord: lon + startdate: '19850101' + tas_var: tas + time_coord: time +CATALOG_FILE: /home/mdtf/wkdir/MDTF_output/MDTF_postprocessed_data.json diff --git a/diagnostics/example_multicase/example_multicase.html b/diagnostics/example_multicase/example_multicase.html index 76825f3f3..0906ff136 100644 --- a/diagnostics/example_multicase/example_multicase.html +++ b/diagnostics/example_multicase/example_multicase.html @@ -1,11 +1,13 @@ -MDTF example diagnostic +MDTF example-multicase diagnostic

Multi-Case Example Diagnostic: zonal-average near-surface temperature anomaly

This POD illustrates how multiple cases (experiments) can be analyzed together. - The framework initializes and processes each case, writes the environment variables for the cases - to a yaml file (case_info.yaml). The example_multicase POD reads information from the yaml file - into a dictionary that it references to ingest data from the output files for each ease. + The MDTF-diagnostics framework initializes and processes each case, writes the environment variables for the cases + to a yaml file (case_info.yml), and exports an ESM Intake catalog with information about the post-processed + data for each ease to the working directory (WORK_DIR). The example_multicase POD driver script reads + environment information from case_info.yml into a dictionary that it references to read data from the + post-processed files in the data catalog.

The example_multicase POD reads near-surface air temperature (TAS) from netcdf output files for multiple cases. @@ -18,8 +20,10 @@

Multi-Case Example Diagnostic: zonal-average near-surface temperature anomal

Time averages, {{FIRSTYR}}-{{LASTYR}} +Time averages, {{startdate}}-{{enddate}} {{CASENAME}} OBS
Model Results
Zonal-mean near-surface temperature anomalies (K) - - plot
+ +plot --> diff --git a/diagnostics/example_multicase/example_multicase.py b/diagnostics/example_multicase/example_multicase.py index 1f9a733d3..b66ada428 100755 --- a/diagnostics/example_multicase/example_multicase.py +++ b/diagnostics/example_multicase/example_multicase.py @@ -34,11 +34,13 @@ # # Required programming language and libraries # -# * Python >= 3.7 +# * Python >= 3.10 # * xarray # * matplotlib +# * intake # * yaml # * sys +# * os # * numpy # # Required model output variables @@ -58,52 +60,46 @@ matplotlib.use("Agg") # non-X windows backend -import xarray as xr import matplotlib.pyplot as plt import numpy as np -import yaml +import intake import sys +import yaml # Part 1: Read in the model data # ------------------------------ - -# Receive a dictionary of case information from the framework. For now, we will -# "fake" a dictionary now with information we are getting from the single case -# POD that is processed by the framework +# Debugging: remove following line in final PR +# os.environ["WORK_DIR"] = "/Users/jess/mdtf/wkdir/MDTF_output/example_multicase" +work_dir = os.environ["WORK_DIR"] +# Receive a dictionary of case information from the framework print("reading case_info") +# Remove following line final PR +# os.environ["case_env_file"] = os.path.join(work_dir, "case_info.yml") case_env_file = os.environ["case_env_file"] -assert(os.path.isfile(case_env_file)) +assert os.path.isfile(case_env_file), f"case environment file not found" with open(case_env_file, 'r') as stream: try: case_info = yaml.safe_load(stream) - # print(parsed_yaml) except yaml.YAMLError as exc: print(exc) -# Sample case_info template ingested from yaml file ('case_info.yaml') -# case_info = { -# "CASENAME": { -# "NAME": os.environ["CASENAME"], -# "TAS_FILE": os.environ["TAS_FILE"], -# "tas_var": os.environ["tas_var"], -# "time_coord": os.environ["time_coord"], -# "lon_coord": os.environ["lon_coord"], -# }, -# "CASENAME1": { -# "NAME": os.environ["CASENAME"], -# "TAS_FILE": os.environ["TAS_FILE"], -# "tas_var": os.environ["tas_var"], -# "time_coord": os.environ["time_coord"], -# "lon_coord": os.environ["lon_coord"], -# }, -# } - -# Loop over cases and load datasets into a separate dict -model_datasets = dict() -for case_name, case_dict in case_info.items(): - ds = xr.open_dataset(case_dict["TAS_FILE"], use_cftime=True) - model_datasets[case_name] = ds - #print(ds) +cat_def_file = case_info['CATALOG_FILE'] +case_list = case_info['CASE_LIST'] +# all cases share variable names and dimension coords, so just get first result for each +tas_var = [case['tas_var'] for case in case_list.values()][0] +time_coord = [case['time_coord'] for case in case_list.values()][0] +lat_coord = [case['lat_coord'] for case in case_list.values()][0] +lon_coord = [case['lon_coord'] for case in case_list.values()][0] +# open the csv file using information provided by the catalog definition file +cat = intake.open_esm_datastore(cat_def_file) +# filter catalog by desired variable and output frequency +tas_subset = cat.search(variable_id=tas_var, frequency="day") +# examine assets for a specific file +#tas_subset['CMIP.synthetic.day.r1i1p1f1.day.gr.atmos.r1i1p1f1.1980-01-01-1984-12-31'].df +# convert tas_subset catalog to an xarray dataset dict +tas_dict = tas_subset.to_dataset_dict( + xarray_open_kwargs={"decode_times": True, "use_cftime": True} +) # Part 2: Do some calculations (time and zonal means) # --------------------------------------------------- @@ -111,11 +107,12 @@ tas_arrays = {} # Loop over cases +for k, v in tas_dict.items(): + # load the tas data for case k + arr = tas_dict[k][tas_var] -for k, v in case_info.items(): # take the time mean - arr = model_datasets[k][case_info[k]["tas_var"]] - arr = arr.mean(dim=case_info[k]["time_coord"]) + arr = arr.mean(dim=tas_dict[k][time_coord].name) # this block shuffles the data to make this single case look more # interesting. ** DELETE THIS ** once we test with real data @@ -130,7 +127,7 @@ arr = arr - arr.mean() # take the zonal mean - arr = arr.mean(dim=case_info[k]["lon_coord"]) + arr = arr.mean(dim=tas_dict[k][lon_coord].name) tas_arrays[k] = arr @@ -153,18 +150,16 @@ plt.title("Zonal Mean Surface Air Temperature Anomaly") # save the plot in the right location -work_dir = os.environ["WK_DIR"] -assert os.path.isdir(f"{work_dir}/model/PS") -plt.savefig(f"{work_dir}/model/PS/example_model_plot.eps", bbox_inches="tight") - - -# Part 4: Clean up and close open file handles -# -------------------------------------------- - -_ = [x.close() for x in model_datasets.values()] +assert os.path.isdir(f"{work_dir}/model/PS"), f'Assertion error: {work_dir}/model/PS not found' +plt.savefig(f"{work_dir}/model/PS/example_multicase_plot.eps", bbox_inches="tight") -# Part 5: Confirm POD executed sucessfully +# Part 4: Close the catalog files and +# release variable dict reference for garbage collection +# ------------------------------------------------------ +cat.close() +tas_dict = None +# Part 5: Confirm POD executed successfully # ---------------------------------------- print("Last log message by example_multicase POD: finished successfully!") sys.exit(0) diff --git a/diagnostics/example_multicase/example_multirun_demo.ipynb b/diagnostics/example_multicase/example_multirun_demo.ipynb new file mode 100644 index 000000000..04493cd12 --- /dev/null +++ b/diagnostics/example_multicase/example_multirun_demo.ipynb @@ -0,0 +1,1424 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2c1fb53d-9b9f-41cc-b36c-1beeb791f2dd", + "metadata": {}, + "source": [ + "# MDTF Example Diagnostic POD for Multiple Cases / Experiments\n", + "### Uses: Data catalogs\n", + "### Disclaimer: Debug mode with some hardcoded values will be found in this notebook. MDTF integration is underway. This is essentially to see how things may flow from the user configuration files to the MDTF framework and into the POD, including figure generation. " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0c4fbe60-f63d-432a-9166-95cd312ba442", + "metadata": {}, + "outputs": [], + "source": [ + "# ================================================================================\n", + "#\n", + "# This file is part of the Multicase Example Diagnostic POD of the MDTF code\n", + "# package (see mdtf/MDTF-diagnostics/LICENSE.txt)\n", + "#\n", + "# Example Diagnostic POD\n", + "#\n", + "# Last update: March 2024 (exploring intake-esm catalog in the MDTF framework and the POD)\n", + "#\n", + "# This example builds upon the single case `example` POD\n", + "# and illustrates how to design and implement a POD that uses multiple\n", + "# model source datasets. These can be the same experiment with different\n", + "# models, two different experiments from the same model, or two different\n", + "# time periods within the same simulation.\n", + "#\n", + "# Version & Contact info\n", + "#\n", + "# - Version/revision information: version 1.1 (Oct-2022)\n", + "# - Model Development Task Force Framework Team\n", + "#\n", + "# Open source copyright agreement\n", + "#\n", + "# The MDTF framework is distributed under the LGPLv3 license (see LICENSE.txt).\n", + "#\n", + "# Functionality\n", + "#\n", + "# Metadata associated with the different cases are passed from the\n", + "# framework to the POD via a yaml file (case_info.yaml) that the POD reads into a dictionary.\n", + "# The POD iterates over the case entries in the dictionary and opens the input datasets.\n", + "# The `tas` variable is extracted for each case and the time average is taken over the dataset.\n", + "# Anomalies are calculated relative to the global mean and then zonally-averaged. The resulting plot\n", + "# contains one line for each case.\n", + "#\n", + "# Required programming language and libraries\n", + "#\n", + "# * Python >= 3.10\n", + "# * xarray\n", + "# * matplotlib\n", + "# * intake\n", + "# * yaml\n", + "# * sys\n", + "# * os\n", + "# * numpy\n", + "#\n", + "# Required model output variables\n", + "#\n", + "# * tas - Surface (2-m) air temperature (CF: air_temperature)\n", + "#\n", + "# References\n", + "#\n", + "# Maloney, E. D, and Co-authors, 2019: Process-oriented evaluation of climate\n", + "# and wether forcasting models. BAMS, 100(9), 1665-1686,\n", + "# doi:10.1175/BAMS-D-18-0042.1." + ] + }, + { + "cell_type": "markdown", + "id": "4e6c0cc5-28bd-4fa9-9210-730842c65a44", + "metadata": {}, + "source": [ + "## Import necessary packages" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "964df112-b4a8-40c5-b7d5-e96f65ddf084", + "metadata": {}, + "outputs": [], + "source": [ + "# Import modules used in the POD\n", + "import os\n", + "import matplotlib\n", + "\n", + "matplotlib.use(\"Agg\") # non-X windows backend\n", + "\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline \n", + "import numpy as np\n", + "import intake\n", + "import sys\n", + "import yaml\n", + "import warnings\n", + "\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "cell_type": "markdown", + "id": "48be4b51-1eb6-437d-8868-7b71cb418454", + "metadata": {}, + "source": [ + "NOTE: We are exploring..so..there will be some hardcoded paths here, which MDTF framework\n", + "will help us remove when the feature is available in the framework." + ] + }, + { + "cell_type": "markdown", + "id": "39a51b87-e19b-4bc5-9d9d-adcf8dc4c6eb", + "metadata": {}, + "source": [ + "## Part 1: Read in case info (in the format (YAML) that MDTF generates for the run)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "844aed9c-5407-492f-97ee-aaa7f995fb7e", + "metadata": {}, + "outputs": [], + "source": [ + "# Debugging: remove following line in final PR\n", + "os.environ[\"WORK_DIR\"] = \"/nbhome/a1r/wkdir/example_multicase\"\n", + "os.environ[\"case_env_file\"] = \"/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/case_info.yaml\"\n", + "os.environ['CATALOG_FILE'] = \"/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json\" \n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "cdd05af0-451b-440a-8551-a189d7d42bbe", + "metadata": {}, + "outputs": [], + "source": [ + "# Receive a dictionary of case information from the framework\n", + "case_env_file = os.environ[\"case_env_file\"]\n", + "assert(os.path.isfile(case_env_file)), f\"case environment file {case_env_file} not found\"\n", + "with open(case_env_file, 'r') as stream:\n", + " try:\n", + " case_info = yaml.safe_load(stream)\n", + " except yaml.YAMLError as exc:\n", + " print(exc)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "bce6e34a-507a-45a9-ad5f-99a3a6b9e180", + "metadata": {}, + "outputs": [], + "source": [ + "cat_def_file = case_info['CATALOG_FILE']\n", + "case_list = case_info['CASE_LIST']\n", + "# all cases share variable names and dimension coords, so just get first result for each\n", + "tas_var = [case['tas_var'] for case in case_list.values()][0]\n", + "time_coord = [case['time_coord'] for case in case_list.values()][0]\n", + "lat_coord = [case['lat_coord'] for case in case_list.values()][0]\n", + "lon_coord = [case['lon_coord'] for case in case_list.values()][0]\n", + "# open the csv file using information provided by the catalog definition file\n" + ] + }, + { + "cell_type": "markdown", + "id": "8492bc06-f196-4037-9a42-52791ef170be", + "metadata": {}, + "source": [ + "## What is in the data catalog? " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "e24a27f2-8a92-4793-af49-b11193866aec", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

esm_catalog_ESM4 catalog with 89 dataset(s) from 785 asset(s):

\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
unique
activity_id1
institution_id0
source_id0
experiment_id2
frequency3
modeling_realm1
table_id0
member_id0
grid_label0
variable_id64
temporal_subset0
chunk_freq0
grid_label.10
platform0
dimensions0
cell_methods0
path785
derived_variable_id0
\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "cat = intake.open_esm_datastore(cat_def_file)\n", + "cat" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "a7145353-c0d2-46a4-8049-fa38aacd3ab8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
activity_idinstitution_idsource_idexperiment_idfrequencymodeling_realmtable_idmember_idgrid_labelvariable_idtemporal_subsetchunk_freqgrid_label.1platformdimensionscell_methodspath
0devNaNNaNc96L65_am5f3b1r0_pdclim1850F3hratmos_cmipNaNNaNNaNprNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
1devNaNNaNc96L65_am5f3b1r0_pdclim1850F3hratmos_cmipNaNNaNNaNrlutNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
2devNaNNaNc96L65_am5f3b1r0_pdclim1850F3hratmos_cmipNaNNaNNaNprNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
3devNaNNaNc96L65_am5f3b1r0_pdclim1850F3hratmos_cmipNaNNaNNaNrlutNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
4devNaNNaNc96L65_am5f3b1r0_pdclim1850F3hratmos_cmipNaNNaNNaNprNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
......................................................
780devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNzg500NaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
781devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNhursNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
782devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNhussNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
783devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNprNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
784devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNpslNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
\n", + "

785 rows × 17 columns

\n", + "
" + ], + "text/plain": [ + " activity_id institution_id source_id experiment_id \\\n", + "0 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "1 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "2 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "3 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "4 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + ".. ... ... ... ... \n", + "780 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "781 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "782 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "783 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "784 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "\n", + " frequency modeling_realm table_id member_id grid_label variable_id \\\n", + "0 3hr atmos_cmip NaN NaN NaN pr \n", + "1 3hr atmos_cmip NaN NaN NaN rlut \n", + "2 3hr atmos_cmip NaN NaN NaN pr \n", + "3 3hr atmos_cmip NaN NaN NaN rlut \n", + "4 3hr atmos_cmip NaN NaN NaN pr \n", + ".. ... ... ... ... ... ... \n", + "780 daily atmos_cmip NaN NaN NaN zg500 \n", + "781 daily atmos_cmip NaN NaN NaN hurs \n", + "782 daily atmos_cmip NaN NaN NaN huss \n", + "783 daily atmos_cmip NaN NaN NaN pr \n", + "784 daily atmos_cmip NaN NaN NaN psl \n", + "\n", + " temporal_subset chunk_freq grid_label.1 platform dimensions \\\n", + "0 NaN NaN NaN NaN NaN \n", + "1 NaN NaN NaN NaN NaN \n", + "2 NaN NaN NaN NaN NaN \n", + "3 NaN NaN NaN NaN NaN \n", + "4 NaN NaN NaN NaN NaN \n", + ".. ... ... ... ... ... \n", + "780 NaN NaN NaN NaN NaN \n", + "781 NaN NaN NaN NaN NaN \n", + "782 NaN NaN NaN NaN NaN \n", + "783 NaN NaN NaN NaN NaN \n", + "784 NaN NaN NaN NaN NaN \n", + "\n", + " cell_methods path \n", + "0 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "1 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "2 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "3 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "4 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + ".. ... ... \n", + "780 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "781 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "782 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "783 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "784 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "\n", + "[785 rows x 17 columns]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cat.df" + ] + }, + { + "cell_type": "markdown", + "id": "79536816-c505-4b2f-8666-68fcca991af1", + "metadata": {}, + "source": [ + "## Searching for TAS DAILY output for my POD" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "cd754709-3dc7-4a4c-90c0-67a2f5672021", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

esm_catalog_ESM4 catalog with 2 dataset(s) from 13 asset(s):

\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
unique
activity_id1
institution_id0
source_id0
experiment_id2
frequency1
modeling_realm1
table_id0
member_id0
grid_label0
variable_id1
temporal_subset0
chunk_freq0
grid_label.10
platform0
dimensions0
cell_methods0
path13
derived_variable_id0
\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# filter catalog by desired variable and output frequency\n", + "tas_subset = cat.search(variable_id=tas_var, frequency=\"daily\")\n", + "tas_subset " + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "6ceae945-1156-4715-ba12-42c1994d81e9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
activity_idinstitution_idsource_idexperiment_idfrequencymodeling_realmtable_idmember_idgrid_labelvariable_idtemporal_subsetchunk_freqgrid_label.1platformdimensionscell_methodspath
0devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
1devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
2devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
3devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
4devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
5devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
6devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
7devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
8devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
9devNaNNaNc96L65_am5f3b1r0_pdclim1850Fdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd...
10devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
11devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
12devNaNNaNc384L65_am5f3b1r0_amipdailyatmos_cmipNaNNaNNaNtasNaNNaNNaNNaNNaNNaN/archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a...
\n", + "
" + ], + "text/plain": [ + " activity_id institution_id source_id experiment_id \\\n", + "0 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "1 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "2 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "3 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "4 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "5 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "6 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "7 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "8 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "9 dev NaN NaN c96L65_am5f3b1r0_pdclim1850F \n", + "10 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "11 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "12 dev NaN NaN c384L65_am5f3b1r0_amip \n", + "\n", + " frequency modeling_realm table_id member_id grid_label variable_id \\\n", + "0 daily atmos_cmip NaN NaN NaN tas \n", + "1 daily atmos_cmip NaN NaN NaN tas \n", + "2 daily atmos_cmip NaN NaN NaN tas \n", + "3 daily atmos_cmip NaN NaN NaN tas \n", + "4 daily atmos_cmip NaN NaN NaN tas \n", + "5 daily atmos_cmip NaN NaN NaN tas \n", + "6 daily atmos_cmip NaN NaN NaN tas \n", + "7 daily atmos_cmip NaN NaN NaN tas \n", + "8 daily atmos_cmip NaN NaN NaN tas \n", + "9 daily atmos_cmip NaN NaN NaN tas \n", + "10 daily atmos_cmip NaN NaN NaN tas \n", + "11 daily atmos_cmip NaN NaN NaN tas \n", + "12 daily atmos_cmip NaN NaN NaN tas \n", + "\n", + " temporal_subset chunk_freq grid_label.1 platform dimensions \\\n", + "0 NaN NaN NaN NaN NaN \n", + "1 NaN NaN NaN NaN NaN \n", + "2 NaN NaN NaN NaN NaN \n", + "3 NaN NaN NaN NaN NaN \n", + "4 NaN NaN NaN NaN NaN \n", + "5 NaN NaN NaN NaN NaN \n", + "6 NaN NaN NaN NaN NaN \n", + "7 NaN NaN NaN NaN NaN \n", + "8 NaN NaN NaN NaN NaN \n", + "9 NaN NaN NaN NaN NaN \n", + "10 NaN NaN NaN NaN NaN \n", + "11 NaN NaN NaN NaN NaN \n", + "12 NaN NaN NaN NaN NaN \n", + "\n", + " cell_methods path \n", + "0 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "1 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "2 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "3 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "4 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "5 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "6 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "7 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "8 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "9 NaN /archive/am5/am5/am5f3b1r0/c96L65_am5f3b1r0_pd... \n", + "10 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "11 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... \n", + "12 NaN /archive/am5/am5/am5f3b1r0/c384L65_am5f3b1r0_a... " + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tas_subset.df" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "0049f883-2870-4e45-bb98-75c200b0ed72", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--> The keys in the returned dictionary of datasets are constructed as follows:\n", + "\t'experiment_id.frequency.modeling_realm.variable_id'\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " 100.00% [2/2 04:06<00:00]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# convert tas_subset catalog to an xarray dataset dict\n", + "tas_dict = tas_subset.to_dataset_dict(\n", + " xarray_open_kwargs={\"decode_times\": True, \"use_cftime\": True}\n", + ")\n", + "# get key list for new dictionary. Each key corresponds to a case\n", + "#tas_keys = [key for key in tas_dict.keys()]\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "54180da2-5da5-48e6-8b2a-7ec07d463b06", + "metadata": {}, + "source": [ + "## Let us do some calculations" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "c7d82e9c-6d6d-436e-968f-c1dcf7ebe19c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "case: c96L65_am5f3b1r0_pdclim1850F.daily.atmos_cmip.tas\n", + "case: c384L65_am5f3b1r0_amip.daily.atmos_cmip.tas\n" + ] + } + ], + "source": [ + "# Part 2: Do some calculations (time and zonal means)\n", + "# ---------------------------------------------------\n", + "\n", + "tas_arrays = {}\n", + "\n", + "# Loop over cases\n", + "for k, v in tas_dict.items():\n", + " # load the tas data for case k\n", + " print(\"case:\",k)\n", + " arr = tas_dict[k][tas_var]\n", + "\n", + " # take the time mean\n", + " arr = arr.mean(dim=tas_dict[k][time_coord].name)\n", + "\n", + " # this block shuffles the data to make this single case look more\n", + " # interesting. ** DELETE THIS ** once we test with real data\n", + "\n", + " arr.load()\n", + " values = arr.to_masked_array().flatten()\n", + " np.random.shuffle(values)\n", + " values = values.reshape(arr.shape)\n", + " arr.values = values\n", + "\n", + " # convert to anomalies\n", + " arr = arr - arr.mean()\n", + "\n", + " # take the zonal mean\n", + " arr = arr.mean(dim=tas_dict[k][lon_coord].name)\n", + "\n", + " tas_arrays[k] = arr\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "f548d1c4-0bf6-4312-bc45-55e650cebb23", + "metadata": {}, + "source": [ + "### We are comparing the above cases\n" + ] + }, + { + "cell_type": "markdown", + "id": "c0b225b0-1d40-4d64-b93c-a00081c4c8b1", + "metadata": {}, + "source": [ + "## Let us PLOT" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "eb5dcce3-bd63-469b-b919-7313a094e4fb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Let's plot!\n", + "--------------------------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "Text(0.5, 1.0, 'Zonal Mean Surface Air Temperature Anomaly')" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+cAAAGJCAYAAADon0K/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOydd5gT1dfHv+nZvktZWDrSQUBeVKQookgRwYLSflJEBQRUUBBREbDQlKZgV0ClKYIF6VWltxWkl116W7bvps99/0hmcmcy6ckmy97P8+wDSSYzNzN37txzz/ecoyCEEDAYDAaDwWAwGAwGg8GIGMpIN4DBYDAYDAaDwWAwGIyyDjPOGQwGg8FgMBgMBoPBiDDMOGcwGAwGg8FgMBgMBiPCMOOcwWAwGAwGg8FgMBiMCMOMcwaDwWAwGAwGg8FgMCIMM84ZDAaDwWAwGAwGg8GIMMw4ZzAYDAaDwWAwGAwGI8Iw45zBYDAYDAaDwWAwGIwIw4xzBoPBYDAYDAaDwWAwIgwzzhkMBuM2Zdu2bVAoFNi2bVukm3LbUlhYiBdeeAGVK1eGQqHAqFGjIt2koFAoFJg0aVKkm8FghA02LjIYjGiGGecMBoPhJwqFwutfaTJwFi5cKLT7n3/+cfmcEILq1atDoVDgsccei0ALfcdsNmPu3Llo0aIFEhMTkZycjCZNmmDIkCE4ceJEyI83ZcoULFy4EC+99BJ++OEH9O/fP+THCBWfffYZFAoFWrVqFZb916pVy6d7Y+HChWE5fmlgyZIlmDNnTqSb4ZVevXpBoVBg3LhxkW4Kg8FglCnUkW4Ag8FglDZ++OEHt59NmjQJZ8+eDZsBFE70ej2WLFmCdu3aid7fvn07Ll26BJ1OF6GW+U7Pnj2xdu1a9O3bFy+++CIsFgtOnDiB1atXo02bNmjYsGFIj7dlyxbcd999mDhxYkj3Gw4WL16MWrVqYe/evThz5gzq1q3rso3BYIBaHdjUYM6cOSgsLBRer1mzBkuXLsXs2bNRoUIF4f02bdoEtP/bgSVLluC///6LaoVFfn4+/vjjD9SqVQtLly7FtGnToFAoIt0sBoPBKBMw45zBYDD85Nlnn5V9/5tvvsHZs2fx8ssvo2vXriXcquB59NFH8fPPP+OTTz4RGWhLlixBy5YtkZWVFcHWeWffvn1YvXo1PvzwQ7z11luiz+bNm4fc3NyQHIfjOJjNZuj1ety4cQONGzcOyX7DSUZGBnbu3ImVK1di6NChWLx4seyCgl6v97qvoqIixMXFubz/xBNPiF5fu3YNS5cuxRNPPIFatWoF2vSopri4GLGxsZFuRkjb8csvv8Bms+G7777DQw89hL/++gvt27cPyb4ZDAaD4Rkma2cwGIwQcPToUbzyyito0aIFPvroI9FnRUVFeP3111G9enXodDo0aNAAH3/8MQghou0UCgVGjhyJX3/9FXfeeSd0Oh2aNGmCdevWibY7f/48hg8fjgYNGiAmJgbly5fHM888g8zMzKB+Q9++fXHr1i1s3LhReM9sNmPFihXo16+f7Hc4jsOcOXPQpEkT6PV6VKpUCUOHDkVOTo5ou99++w3dunVDlSpVoNPpUKdOHbz//vuw2Wyi7R588EHceeedOHbsGDp06IDY2FhUrVoVM2bM8Nr+s2fPAgDatm3r8plKpUL58uWF14MGDZI1GCdNmuTiJeSvy+LFi9GkSRPodDqsW7cOCoUCGRkZ+PPPPwXJdmZmJsxmM9599120bNkSSUlJiIuLw/3334+tW7fKnr+5c+eiadOm0Ov1qFixIrp06YL9+/eLtvvxxx/RsmVLxMTEoFy5cujTpw8uXrzo9ZzwLF68GCkpKejWrRuefvppLF68WHY7aUgGfz6OHTuGfv36ISUlxUVZ4S++/Ba+Hxw+fBjt27dHbGws6tatixUrVgCwqzlatWqFmJgYNGjQAJs2bRJ9n2/3iRMn0KtXLyQmJqJ8+fJ49dVXYTQag2rTgQMH8MADDyA2NlZYBPKlfz/44IP4888/cf78eaG/8H2QDy2R3sNy8dGe2mEymTBx4kTUrVsXOp0O1atXxxtvvAGTyeTz9Vm8eDEeeeQRdOjQAY0aNZLtK3x7d+zYgddeew0VK1ZEXFwcnnzySdy8edNl+88++0y4d6pUqYIRI0a4LJYFe80DHRcnTpwIjUYj2+4hQ4YgOTlZts8wGAxGOGDGOYPBYARJcXExevXqBZVKhWXLlonk34QQ9OjRA7Nnz0aXLl0wa9YsNGjQAGPHjsVrr73msq9//vkHw4cPR58+fTBjxgwYjUb07NkTt27dErbZt28fdu7ciT59+uCTTz7BsGHDsHnzZjz44IMoLi4O+HfUqlULrVu3xtKlS4X31q5di7y8PPTp00f2O0OHDsXYsWPRtm1bzJ07F8899xwWL16Mzp07w2KxCNstXLgQ8fHxeO211zB37ly0bNkS7777Lt58802Xfebk5KBLly5o3rw5Zs6ciYYNG2LcuHFYu3atx/bXrFkTgN24sFqtgZwCt2zZsgWjR49G7969MXfuXKSlpeGHH35AhQoVcNddd+GHH37ADz/8gIoVKyI/Px/ffPMNHnzwQUyfPh2TJk3CzZs30blzZ6Snp4v2+/zzz2PUqFGoXr06pk+fjjfffBN6vR67d+8Wtvnwww8xYMAA1KtXD7NmzcKoUaOwefNmPPDAAz6rARYvXoynnnoKWq0Wffv2xenTp7Fv3z6ff/8zzzyD4uJiTJkyBS+++KLP35Piz2/JycnBY489hlatWmHGjBnQ6XTo06cPli9fjj59+uDRRx/FtGnTUFRUhKeffhoFBQUux+vVqxeMRiOmTp2KRx99FJ988gmGDBkScJtu3bqFrl274q677sKcOXPQoUMHAL7177fffht33XUXKlSoIPSXQOPP5drBcRx69OiBjz/+GN27d8enn36KJ554ArNnz0bv3r192u+VK1ewdetW9O3bF4B9wW7FihUwm82y27/88sv4999/MXHiRLz00kv4448/MHLkSNE2kyZNwogRI1ClShXMnDkTPXv2xJdffolOnTqJxggguGse6LjYv39/WK1WLF++XPQ+vzDZs2dPnxQlDAaDERIIg8FgMIJi8ODBBABZtGiRy2e//vorAUA++OAD0ftPP/00USgU5MyZM8J7AIhWqxW99++//xIA5NNPPxXeKy4udjnOrl27CADy/fffC+9t3bqVACBbt2712P4FCxYQAGTfvn1k3rx5JCEhQTjGM888Qzp06EAIIaRmzZqkW7duwvf+/vtvAoAsXrxYtL9169a5vC/X5qFDh5LY2FhiNBqF99q3b+/yO0wmE6lcuTLp2bOnx9/BcZzw/UqVKpG+ffuS+fPnk/Pnz7tsO3DgQFKzZk2X9ydOnEikj0YARKlUkqNHj7psLz0nhBBitVqJyWQSvZeTk0MqVapEBg8eLLy3ZcsWAoC88sorsr+FEEIyMzOJSqUiH374oejzI0eOELVa7fK+HPv37ycAyMaNG4V9V6tWjbz66qsu2wIgEydOFF7z56Nv375ejyPlo48+IgBIRkaG37+Fv45LliwR3jtx4oRwLXbv3i28v379egKALFiwwKXdPXr0EB1r+PDhBAD5999/A27TF1984fJbfe3f3bp1k+13/D3InyseuXvYXTt++OEHolQqyd9//y16/4svviAAyI4dO1yOK+Xjjz8mMTExJD8/nxBCyKlTpwgAsmrVKtn2duzYUeirhBAyevRoolKpSG5uLiGEkBs3bhCtVks6depEbDabsN28efMIAPLdd9+5/K5Ar3kw42Lr1q1Jq1atRN9duXKlT+Mng8FghBLmOWcwGIwgWLJkCb777jv0798fAwYMcPl8zZo1UKlUeOWVV0Tvv/766yCEuHiDO3bsiDp16givmzVrhsTERJw7d054LyYmRvi/xWLBrVu3ULduXSQnJ+PgwYNB/Z5evXrBYDBg9erVKCgowOrVq91K2n/++WckJSXhkUceQVZWlvDXsmVLxMfHi2TcdJsLCgqQlZWF+++/H8XFxS5Z1OPj40Vx/VqtFvfee6/oHMihUCiwfv16fPDBB0hJScHSpUsxYsQI1KxZE7179w4q5rx9+/Y+x5arVCpotVoAdtl6dnY2rFYr7r77btH1+eWXX6BQKGRjv3lp/cqVK8FxHHr16iU6x5UrV0a9evVkpfJSFi9ejEqVKgleXoVCgd69e2PZsmUuYQXuGDZsmE/becLf3xIfHy9SbDRo0ADJyclo1KiRKOEi/3+5/jFixAjR65dffhmA/b4MpE06nQ7PPfecy3H86d+hQK4dP//8Mxo1aoSGDRuKfstDDz0EAD73lW7duiEhIQEAUK9ePbRs2dJtGMSQIUNEYSD3338/bDYbzp8/DwDYtGkTzGYzRo0aBaXSOeV88cUXkZiYiD///FO0v2CueTDj4oABA7Bnzx4hNIY/F9WrV2fx9gwGo0RhCeEYDAYjQE6fPo1hw4ahfv36+Oyzz2S3OX/+PKpUqSJMdnkaNWokfE5To0YNl32kpKSIYrgNBgOmTp2KBQsW4PLly6LY9by8vIB/DwBUrFgRHTt2xJIlS1BcXAybzYann35adtvTp08jLy8Pqampsp/fuHFD+P/Ro0fxzjvvYMuWLcjPzxdtJ21ztWrVXOK+U1JScPjwYa/t1+l0ePvtt/H222/j6tWr2L59O+bOnYuffvoJGo0GP/74o9d9yFG7dm2/tl+0aBFmzpyJEydOiKS79H7Onj2LKlWqoFy5cm73c/r0aRBCUK9ePdnPNRqNx3bYbDYsW7YMHTp0QEZGhvB+q1atMHPmTGzevBmdOnXy+nv8/f1y+Ptb5PpBUlISqlev7vIeAJc8BwBcjlWnTh0olUohDtnfNlWtWlVYeKHxp3+HArl2nD59GsePH0fFihVlv0Pfj3IcP34chw4dwoABA3DmzBnh/QcffBDz589Hfn4+EhMTRd+RjlcpKSkAnNeCH98aNGgg2k6r1eKOO+5wGf+CuebBjIu9e/fGqFGjsHjxYrz77rvIy8vD6tWrMXr0aJapnsFglCjMOGcwGIwAMJlM6N27N8xmM5YtW4b4+PiQ7FelUsm+T080X375ZSxYsACjRo1C69atkZSUBIVCgT59+oDjuKDb0K9fP7z44ou4du0aunbtiuTkZNntOI5DamqqW68abyTk5uaiffv2SExMxHvvvYc6depAr9fj4MGDGDdunEubfTkHvpCWloY+ffqgZ8+eaNKkCX766ScsXLgQarXa7YTbnSeZ9sp548cff8SgQYPwxBNPYOzYsUhNTYVKpcLUqVNFnjlf4DgOCoUCa9eulT0v3vrdli1bcPXqVSxbtgzLli1z+Xzx4sU+Gef+/H53+Ptb3PWDYPqH9Lr72ya58+Bv//alXTz+9EeO49C0aVPMmjVL9jtSA1cKv3A1evRojB492uXzX375xcVbH6p71dv+wj0upqSk4LHHHhOM8xUrVsBkMrmtzMFgMBjhghnnDAaDEQBjxozBoUOHMHfuXLRo0cLtdjVr1sSmTZtQUFAg8p7zUlc+iZk/rFixAgMHDsTMmTOF94xGY8hKhT355JMYOnQodu/e7ZIkiaZOnTrYtGkT2rZt69F427ZtG27duoWVK1figQceEN6nPbnhRKPRoFmzZjh9+rQgWU5JSZE9X1JPXiCsWLECd9xxB1auXCkyuqTy9Tp16mD9+vXIzs526z2vU6cOCCGoXbs26tev73dbFi9ejNTUVMyfP9/ls5UrV2LVqlX44osvQmJ8eyPY3xIIp0+fFnn9z5w5A47jhCzpoWiTP/3bnRHOe5ylfdKf/linTh38+++/ePjhh/329hJCsGTJEnTo0AHDhw93+fz999/H4sWLZSX9nuDHt5MnT+KOO+4Q3jebzcjIyEDHjh392p8ngh0XBwwYgMcffxz79u3D4sWL0aJFCzRp0iRk7WMwGAxfYDHnDAaD4SerVq3CvHnz0KNHD5dYcimPPvoobDYb5s2bJ3p/9uzZUCgUAdVDV6lULp6pTz/91Of4YW/Ex8fj888/x6RJk9C9e3e32/Xq1Qs2mw3vv/++y2dWq1WYFPNeL7rNZrPZbShAoJw+fRoXLlxweT83Nxe7du1CSkqK4M2vU6cO8vLyRFL5q1evYtWqVUG3Q+737tmzB7t27RJt17NnTxBCMHnyZJd98N996qmnoFKpMHnyZJdrTggRZfGXYjAYsHLlSjz22GN4+umnXf5GjhyJgoIC/P777wH/Vn8I5rcEinRR4tNPPwUA4b4LRZv86d9xcXGyEms+z8Rff/0lvGez2fDVV195PT5Pr169cPnyZXz99dcunxkMBhQVFbn97o4dO5CZmYnnnntOtq/07t0bW7duxZUrV3xuD2DPoaHVavHJJ5+Izs+3336LvLw8dOvWza/9eSLYcbFr166oUKECpk+fju3btzOvOYPBiAjMc85gMBh+cPXqVTz//PNQqVR4+OGH3cYw16lTB61bt0b37t3RoUMHvP3228jMzETz5s2xYcMG/Pbbbxg1apQo+ZuvPPbYY/jhhx+QlJSExo0bY9euXdi0aZOojnewDBw40Os27du3x9ChQzF16lSkp6ejU6dO0Gg0OH36NH7++WfMnTsXTz/9NNq0aYOUlBQMHDgQr7zyChQKBX744YeApa/u+Pfff9GvXz907doV999/P8qVK4fLly9j0aJFuHLlCubMmSMYUn369MG4cePw5JNP4pVXXkFxcTE+//xz1K9fP+ikeo899hhWrlyJJ598Et26dUNGRga++OILNG7cGIWFhcJ2HTp0QP/+/fHJJ5/g9OnT6NKlCziOw99//40OHTpg5MiRqFOnDj744AOMHz8emZmZeOKJJ5CQkICMjAysWrUKQ4YMwZgxY2Tb8fvvv6OgoAA9evSQ/fy+++5DxYoVsXjxYp9LbQVDML8lUDIyMtCjRw906dIFu3btwo8//oh+/fqhefPmIWuTP/27ZcuWWL58OV577TXcc889iI+PR/fu3dGkSRPcd999GD9+vKCkWLZsmV8lAfv374+ffvoJw4YNw9atW9G2bVvYbDacOHECP/30E9avX4+7775b9ruLFy+GSqVyayz36NEDb7/9NpYtWyZbAtIdFStWxPjx4zF58mR06dIFPXr0wMmTJ/HZZ5/hnnvuCakBHOy4qNFo0KdPH8ybNw8qlUooJ8dgMBglSglkhGcwGIzbBr4Mj7e/gQMHCt8pKCggo0ePJlWqVCEajYbUq1ePfPTRR6ISRITYy1iNGDHC5Zg1a9YU7S8nJ4c899xzpEKFCiQ+Pp507tyZnDhxwmW7QEqpeUKubBghhHz11VekZcuWJCYmhiQkJJCmTZuSN954g1y5ckXYZseOHeS+++4jMTExpEqVKuSNN94QyiFJy0Q1adLE5RjuSp/RXL9+nUybNo20b9+epKWlEbVaTVJSUshDDz1EVqxY4bL9hg0byJ133km0Wi1p0KAB+fHHH92WUpO7Lu7OCcdxZMqUKaRmzZpEp9ORFi1akNWrV8v+BqvVSj766CPSsGFDotVqScWKFUnXrl3JgQMHRNv98ssvpF27diQuLo7ExcWRhg0bkhEjRpCTJ0+6PR/du3cner2eFBUVud1m0KBBRKPRkKysLOG3ypVSu3nzptt9uENaSs2f3+KuH7jrg9JrxLf72LFj5OmnnyYJCQkkJSWFjBw5khgMBpfvB9MmQnzv34WFhaRfv34kOTmZABD1h7Nnz5KOHTsSnU5HKlWqRN566y2yceNGn+8RQggxm81k+vTppEmTJkSn05GUlBTSsmVLMnnyZJKXl+f2O+XLlyf333+/7Oc8tWvXJi1atCCEuB8z3I058+bNIw0bNiQajYZUqlSJvPTSSyQnJ0e0TbDXPBTj4t69ewkA0qlTJ4/ngsFgMMKFgpAQuy4YDAaDwWAwIsikSZMwefJk3Lx5ExUqVIh0cxilhH///Rd33XUXvv/+e/Tv3z/SzWEwGGUQFnPOYDAYDAaDwSjzfP3114iPj8dTTz0V6aYwGIwyCos5ZzAYDAaDwWCUWf744w8cO3YMX331FUaOHIm4uLhIN4nBYJRRmHHOYDAYDAaDwSizvPzyy7h+/ToeffRR2eoJDAaDUVKwmHMGg8FgMBgMBoPBYDAiDIs5ZzAYDAaDwWAwGAwGI8Iw45zBYDAYDAaDwWAwGIwIU6ZizjmOw5UrV5CQkACFQhHp5jAYDAaDwWAwGAwG4zaHEIKCggJUqVIFSqV7/3iZMs6vXLmC6tWrR7oZDAaDwWAwGAwGg8EoY1y8eBHVqlVz+3mZMs4TEhIA2E9KYmJihFvDYDAYDAaDwWAwGIzbnfz8fFSvXl2wR91RpoxzXsqemJjIjHMGg8FgMBgMBoPBYJQY3kKrWUI4BoPBYDAYDAaDwWAwIgwzzhkMBoPBYDAYDAaDwYgwzDhnMBgMBoPBYDAYDAYjwpSpmHMGg8FgMBilD5vNBovFEulmMBgMBoMhi0qlglqtDrpcNzPOGQwGg8FgRC2FhYW4dOkSCCGRbgqDwWAwGG6JjY1FWloatFptwPtgxjmDwWAwGIyoxGaz4dKlS4iNjUXFihWD9kgwGAwGgxFqCCEwm824efMmMjIyUK9ePSiVgUWPM+OcwWAwGAxGVGKxWEAIQcWKFRETExPp5jAYDAaDIUtMTAw0Gg3Onz8Ps9kMvV4f0H5YQjgGg8FgMBhRDfOYMxgMBiPaCdRbLtpHCNpRInz++edo1qwZEhMTkZiYiNatW2Pt2rWRbhaDwWAwGAwGg8FgMBhBU2qM82rVqmHatGk4cOAA9u/fj4ceegiPP/44jh49GummMRgMBiOEmK0cfku/jOv5xkg3hcFgMBgMBqPEKDXGeffu3fHoo4+iXr16qF+/Pj788EPEx8dj9+7dbr9jMpmQn58v+mMwGAxGdLPx2HW8uiwd09aeiHRTGAxGlHLixAncd9990Ov1uOuuu3z6zsKFC5GcnBzWdpU027Ztg0KhQG5urk/bP/jggxg1apTwulatWpgzZ05Y2lbayMzMhEKhQHp6us/fGTRoEJ544gnhtfT8MkJDWeqnpcY4p7HZbFi2bBmKiorQunVrt9tNnToVSUlJwl/16tVLsJW3B2dvFsJosUW6GQwGowyRVWgCAFzOMUS4JQxGeDh48CAeeeQRJCcno3z58hgyZAgKCwtdtlu4cCGaNWsGvV6P1NRUjBgxQvjMF6MsNzcXI0aMQFpaGnQ6HerXr481a9YIn0+aNAkKhUL017Bhw5D+1kAYNGiQS7u6dOki2mbixImIi4vDyZMnsXnzZgBAjx49UKNGDej1eqSlpaF///64cuWKX8e+evUq+vXrh/r160OpVPplaBFC8O677yItLQ0xMTHo2LEjTp8+7dfxS5p9+/ZhyJAhQe3D13M2Z84cNGjQADExMahevTpGjx4No9GpkPKlPxqNRowYMQLly5dHfHw8evbsievXrwuf8wa29O/ZZ58N6je6Y+7cuVi4cGFY9i1FuhBQlghFPy0tCyelyjg/cuQI4uPjodPpMGzYMKxatQqNGzd2u/348eORl5cn/F28eLEEW1v6OXYlHw/P3I7Xf/430k1hMBhlCLOVAwDkGswRbgmDEXquXLmCjh07om7dutizZw/WrVuHo0ePYtCgQaLtZs2ahbfffhtvvvkmjh49ik2bNqFz584+H8dsNuORRx5BZmYmVqxYgZMnT+Lrr79G1apVRds1adIEV69eFf7++eefUPzMoOnSpYuoXUuXLhV9fvbsWbRr1w41a9ZE+fLlAQAdOnTATz/9hJMnT+KXX37B2bNn8fTTT/t1XJPJhIoVK+Kdd95B8+bNffqO2Wwfq2bMmIFPPvkEX3zxBfbs2YO4uDh07txZZIBGGxUrVkRsbGxQ+/DlnC1ZsgRvvvkmJk6ciOPHj+Pbb7/F8uXL8dZbb4m289YfR48ejT/++AM///wztm/fjitXruCpp55yOd6mTZtE+5k/f35Qv9EdSUlJt50aIxoJRT8tNZBShMlkIqdPnyb79+8nb775JqlQoQI5evSoz9/Py8sjAEheXl4YW3n7sPbIFVJz3GrS7ZO/It0UBoNRhpi35TSpOW41uffDjZFuCiPCGAwGcuzYMWIwGAghhHAcR4pMloj8cRznV9ttNhuZPn06qVOnDtFqtaR69erkgw8+IF9++SVJTU0lNptN2Pbw4cMEADl9+jQhhJDs7GwSExNDNm3a5Hb/W7duJQBITk6O7Oeff/45ueOOO4jZbHa7j4kTJ5LmzZv79btoZs6cSe68804SGxtLqlWrRl566SVSUFAgfL5gwQKSlJRE/vjjD1K/fn0SExNDevbsSYqKisjChQtJzZo1SXJyMnn55ZeJ1WoVvjdw4EDy+OOPuz0uANHfxIkTZbf77bffiEKhEM4B355Vq1aRunXrEp1ORzp16kQuXLgg+/327duTV1991eV9vn0ffPABSUtLI7Vq1SIcx5HKlSuTjz76SNguNzeX6HQ6snTpUg9n0U5GRgYBQJYuXUpat25NdDodadKkCdm2bZtouz///JPUq1eP6PV68uCDD5IFCxa49IN//vmHtG/fnsTExJDk5GTSqVMnkp2dLfubatasSWbPni28BkC++OIL0q1bNxITE0MaNmxIdu7cSU6fPk3at29PYmNjSevWrcmZM2f8OmcjRowgDz30kOi91157jbRt21Z47a0/5ubmEo1GQ37++WfhvePHjxMAZNeuXaLzeOjQIbf74dmzZw+56667iE6nIy1btiQrV64UfddqtZLBgweTWrVqEb1eT+rXr0/mzJkj2oe0r9K/f/LkyaRJkyYux23evDl55513RO95O9bEiRNd+v3WrVuF37t8+XLSrl07otfryd13301OnjxJ9u7dS1q2bEni4uJIly5dyI0bN4T92Ww2MnnyZFK1alWi1WpJ8+bNydq1a4XPTSYTGTFiBKlcuTLR6XSkRo0aZMqUKV7PKSGE5OTkkCFDhpDU1FShH//xxx+EkMDHBLl++tlnn5EuXboQvV5PateuLeoXUgYOHOhy/jIyMny6xlu3biX33HMPiY2NJUlJSaRNmzYkMzNT9jjSZxaNr3ZoqapzrtVqUbduXQBAy5YtsW/fPsydOxdffvllhFt2e2K2EQCAxUoi3BIGg1GWMPGe82JLhFvCiDYMFhsav7s+Isc+9l5nxGp9nzaNHz8eX3/9NWbPno127drh6tWrOHHiBAwGA7RarajkDl/D/Z9//kHdunWxceNGcByHy5cvo1GjRigoKECbNm0wc+ZMn0P0fv/9d7Ru3RojRozAb7/9hooVK6Jfv34YN24cVCqVsN3p06dRpUoV6PV6tG7dGlOnTkWNGjV8OoZSqcQnn3yC2rVr49y5cxg+fDjeeOMNfPbZZ8I2xcXF+OSTT7Bs2TIUFBTgqaeewpNPPonk5GSsWbMG586dQ8+ePdG2bVv07t1b+N62bduQmpqKlJQUPPTQQ/jggw8ED/nVq1fRsWNHdOnSBWPGjEF8fLxL27Kzs7F48WK0adMGGo1G1J4PP/wQ33//PbRaLYYPH44+ffpgx44dPv1mns2bNyMxMREbN24EAGRkZODatWvo2LGjsE1SUhJatWqFXbt2oU+fPj7td+zYsZgzZw4aN26MWbNmoXv37sjIyED58uVx8eJFPPXUUxgxYgSGDBmC/fv34/XXXxd9Pz09HQ8//DAGDx6MuXPnQq1WY+vWrbDZfA9RfP/99zFr1izMmjUL48aNQ79+/XDHHXdg/PjxqFGjBgYPHoyRI0f6VTWpTZs2+PHHH7F3717ce++9OHfuHNasWYP+/fuLtvPUHw8cOACLxSI6xw0bNkSNGjWwa9cu3HfffT63p7CwEI899hgeeeQR/Pjjj8jIyMCrr74q2objOFSrVg0///wzypcvj507d2LIkCFIS0tDr169vB5j8ODBmDx5Mvbt24d77rkHAHDo0CEcPnwYK1eu9OtYY8aMwfHjx5Gfn48FCxYAAMqVKyeEbUycOBFz5swRrk+/fv2QkJCAuXPnIjY2Fr169cK7776Lzz//HIBdjj9z5kx8+eWXaNGiBb777jv06NEDR48eRb169fDJJ5/g999/x08//YQaNWrg4sWLPimQOY5D165dUVBQgB9//BF16tTBsWPHRGNOoGOClAkTJmDatGmYO3cufvjhB/Tp0wdHjhxBo0aNXLadO3cuTp06hTvvvBPvvfceALs33tt5t1qteOKJJ/Diiy9i6dKlMJvN2Lt3b1jLe5Yq41wKx3EwmUyRbsZti9VmnyCbHf8yGAxGScDL2k1WDkaLDXqNyss3GIzooqCgAHPnzsW8efMwcOBAAECdOnXQrl07HD16FK+99ho++ugjvPrqqygqKsKbb74JwG50AsC5c+fAcRymTJmCuXPnIikpCe+88w4eeeQRHD58GFqt1msbzp07hy1btuB///sf1qxZgzNnzmD48OGwWCyYOHEiAKBVq1ZYuHAhGjRogKtXr2Ly5Mm4//778d9//yEhIcHrMaSJxT744AMMGzZMZJxbLBZ8/vnnqFOnDgDg6aefxg8//IDr168jPj4ejRs3RocOHbB161ZhIt6lSxc89dRTqF27Ns6ePYu33noLXbt2xa5du6BSqVC5cmWo1WrEx8ejcuXKojaNGzcO8+bNQ3FxMe677z6sXr1a9LnFYsG8efPQqlUrAMCiRYvQqFEjwWj0lbi4OHzzzTfCtdi5cycAoFKlSqLtKlWqhGvXrvm835EjR6Jnz54A7GWE161bh2+//RZvvPGGcB5nzpwJAGjQoAGOHDmC6dOnC9+fMWMG7r77btE1aNKkic/HB4DnnntOMD7HjRuH1q1bY8KECUJYxauvvornnnvOr33269cPWVlZaNeuHQghsFqtGDZsmEjW7q0/Xrt2DVqt1kVGLneO27RpI1oA+/vvv9GiRQvh9ZIlS8BxHL799lvo9Xo0adIEly5dwksvvSRso9FoMHnyZOF17dq1sWvXLvz0008+GefVqlVD586dsWDBAsE4X7BgAdq3b4877rhDtK23Y8XHxyMmJgYmk8mlzwPAmDFjRNenb9++2Lx5M9q2bQsAeP7550Wx8R9//DHGjRsnLBpNnz4dW7duxZw5czB//nxcuHAB9erVQ7t27aBQKFCzZk2vvxewhxPs3bsXx48fR/369QHA5bcGMibI8cwzz+CFF14AYF9Q2rhxIz799FNR3+dJSkqCVqtFbGys6PypVCqP5z0/Px95eXl47LHHhPbKGf+hpNQY5+PHj0fXrl1Ro0YNFBQUYMmSJdi2bRvWr4/MCnpZwMIb51ZmnDMYjJLDZHV6ePIMFmacMwRiNCoce8/3uOtQH9tXjh8/DpPJhIcfftjlsyZNmmDRokV47bXXMH78eKhUKrzyyiuoVKmSYExwHAeLxYJPPvkEnTp1AgAsXboUlStXxtatW32KPec4Dqmpqfjqq6+gUqnQsmVLXL58GR999JFgnHft2lXYvlmzZmjVqhVq1qyJn376Cc8//7zXY2zatAlTp07FiRMnkJ+fD6vVCqPRiOLiYiE+NDY2VpjUAnZDqlatWiJvd6VKlXDjxg3hNe1lbtq0KZo1a4Y6depg27ZtsueUZuzYsXj++edx/vx5TJ48GQMGDMDq1asFT5darRYMJcDueU1OTsbx48f9Ms6bNm3q0yKJv9CJjtVqNe6++24cP34cgL1f8YsKctsDds/5M888E1QbmjVrJvyfX2xo2rSp6D2j0Yj8/HwkJib6tM9t27ZhypQp+Oyzz9CqVSucOXMGr776Kt5//31MmDABQPD9kWb58uUiI0qqODl+/LiQbJFHLsn0/Pnz8d133+HChQswGAwwm80+VwcAgBdffBGDBw/GrFmzoFQqsWTJEsyePVt222CO5cs14++x/Px8XLlyRTDcedq2bYt//7XnmRo0aBAeeeQRNGjQAF26dMFjjz0mjEWeSE9PR7Vq1QTDXI5AxgQ5pNerdevWfmXa5/F03suVK4dBgwahc+fOeOSRR9CxY0f06tULaWlpfh/HV0pNQrgbN25gwIABaNCgAR5++GHs27cP69evxyOPPBLppt228LJ25jlnMBglCb0gmGdg0naGE4VCgVitOiJ//sgYeZm6O/r164dr167h8uXLuHXrFiZNmoSbN28KHiZ+4kcnva1YsSIqVKiACxcu+NSGtLQ01K9fXyQnbdSoEa5duyYkMJOSnJyM+vXr48yZM173n5mZicceewzNmjXDL7/8ggMHDghJt+j905JywH4N5d7jOPdzjTvuuAMVKlTwqV0VKlRA/fr18cgjj2DZsmVYs2aNx7K7gRIXFyd6zXvj6Mzh/Gs5T2e48Nb3fIG+Pny/l3vP0zWTMmHCBPTv3x8vvPACmjZtiieffBJTpkzB1KlT3e5H2h8rV64Ms9nsUqFA7hxXr14ddevWFf50Op3PbeVZtmwZxowZg+effx4bNmxAeno6nnvuObf3jxzdu3eHTqfDqlWr8Mcff8BiscgmKQz2WL5cM3+u1//93/8hIyMD77//PgwGA3r16uVTckVf+l8oxoRQ4ct5X7BgAXbt2oU2bdpg+fLlqF+/fljGFJ5SY5x/++23yMzMhMlkwo0bN7Bp0yZmmIcZi5V5zhkMRslDjzks7pxRGqlXrx5iYmKEEl/uqFSpEuLj47F8+XLo9XphXsN7tE6ePClsm52djaysLJ/lpW3btsWZM2dEE9xTp04hLS3Nrce3sLAQZ8+e9ckrdODAAXAch5kzZ+K+++5D/fr1/S5b5iuXLl3CrVu3/PZW8b+dDoG0Wq3Yv3+/8PrkyZPIzc0NWqpau3ZtVK5cWXTN8/PzsWfPHo9lf6XQk36r1YoDBw4IbePl9+62B+weVG/9LhIUFxeLZOYAhIUje34vV6T9sWXLltBoNKLfd/LkSVy4cMGvcwzYz+Xhw4dFmfSl53LHjh1o06YNhg8fjhYtWqBu3bo4e/asX8dRq9UYOHAgFixYgAULFqBPnz6yBqwvx9JqtX7lDnBHYmIiqlSp4pJnYceOHaIFwcTERPTu3Rtff/01li9fjl9++QXZ2dke992sWTNcunQJp06dCrqd3pBer927d3u8j+XOn6/XuEWLFhg/fjx27tyJO++8E0uWLAnNj5Ch1BjnjJLHyjHjnBE8HEdw7Eq+kMOAwfAGrdZhnnNGaUSv12PcuHF444038P333+Ps2bPYvXs3vv32WwDAvHnzcPDgQZw6dQrz58/HyJEjMXXqVCGWtn79+nj88cfx6quvYufOnfjvv/8wcOBANGzYEB06dBAd68iRI0hPTxf+eFnqSy+9hOzsbLz66qs4deoU/vzzT0yZMkVUK33MmDHYvn07MjMzsXPnTjz55JNQqVTo27ev199Yt25dWCwWfPrppzh37hx++OEHfPHFF0Gfu8LCQowdOxa7d+9GZmYmNm/ejMcffxx169b1KOffs2cP5s2bh/T0dJw/fx5btmxB3759UadOHZHhptFo8PLLL2PPnj04cOAABg0ahPvuu08kaefPZWFhIW7evIn09HQcO3bMY7sVCgVGjRqFDz74AL///juOHDmCAQMGoEqVKn7Vpp4/fz5WrVqFEydOYMSIEcjJycHgwYMBAMOGDcPp06cxduxYnDx5EkuWLHGpsT1+/Hjs27cPw4cPx+HDh3HixAl8/vnnyMrK8rkNgeDtnHXv3h2ff/45li1bhoyMDGzcuBETJkxA9+7dBSPdW39MSkrC888/j9deew1bt27FgQMH8Nxzz6F169Zek8Ht3bsXDRs2xOXLlwHY1SsKhQIvvvgijh07hjVr1uDjjz8WfadevXrYv38/1q9fj1OnTmHChAnYt2+f3+fmhRdewJYtW7Bu3TrhWq5atUpUw92XY9WqVQuHDx/GyZMnkZWVBYsl8Ofj2LFjMX36dCxfvhwnT57Em2++ifT0dCEp3qxZs7B06VKcOHECp06dws8//4zKlSt7LRvXvn17PPDAA+jZsyc2btyIjIwMrF27FuvWrQu4re74+eef8d133+HUqVOYOHEi9u7di5EjRwqfP/zww5g3b57wulatWtizZw8yMzORlZUFjuO8nveMjAyMHz8eu3btwvnz57FhwwacPn06rHHnpSbmnFHyWPhs7cyoYgTBioOX8MaKwxjbuQFGdKgb6eYwSgFizzmrdc4onUyYMAFqtRrvvvsurly5grS0NAwbNgyA3VCYOHEiCgsL0bBhQ3z55ZcuWau///57jB49Gt26dYNSqUT79u2xbt06F/nnAw88IHqtUqlgtVpRvXp1rF+/HqNHj0azZs1QtWpVvPrqqxg3bpyw7aVLl9C3b1/cunULFStWRLt27bB7925UrFjR6+9r3rw5Zs2ahenTp2P8+PF44IEHMHXqVAwYMCDQUya0//Dhw1i0aBFyc3NRpUoVdOrUCe+//75HaXJsbCxWrlyJiRMnoqioCGlpaejSpQveeecd0fdiY2OFDOSXL1/G/fffLyya8NCJww4cOIAlS5agZs2ayMzM9Nj2N954A0VFRRgyZAhyc3PRrl07rFu3ThTX7I1p06Zh2rRpSE9PR926dfH777+jQoUKAIAaNWrgl19+wejRo/Hpp5/i3nvvxZQpUwSDD7Av7GzYsAFvvfUW7r33XsTExKBVq1Y+LbgEg7dz9s4770ChUOCdd97B5cuXUbFiRXTv3h0ffvih8D1f+uPs2bOhVCrRs2dPmEwmdO7cWTYBmJTi4mKcPHlSMGjj4+Pxxx9/YNiwYWjRogUaN26M6dOnC8n4AGDo0KE4dOgQevfuDYVCgb59+2L48OF+ZakH7IZ3mzZtkJ2dLeQMyMvLEyljfDnWiy++iG3btuHuu+9GYWEhtm7dilq1avnVFp5XXnkFeXl5eP3113Hjxg00btwYv//+O+rVqwcASEhIwIwZM3D69GmoVCrcc889WLNmjYv6QY5ffvkFY8aMQd++fVFUVIS6deti2rRpAbXTE5MnT8ayZcswfPhwpKWlYenSpSLP/9mzZ0WLUmPGjMHAgQPRuHFjGAwGZGRkeD3vsbGxOHHiBBYtWiSod0aMGIGhQ4eG/PfwKIg7LcltSH5+PpKSkpCXl+dzAouyzOyNpzB382kAwLkpj0KpDF/ZAMbty0frT2D+1rN44q4qmNOnhfcvMMo8gxfuw5YT9kQw73RrhBfuv8PLNxi3K0ajERkZGahdu7ZfBg6DUdrIzMxE7dq1cejQIb8SjjGiH0II6tWrh+HDh+O1116LdHNuCxQKBVatWuWXKqUk8PTM8tUOZZ5zhlusVJya2cZBr2QZkxn+w3tBc5k8meEjLCEcg8FgMG4Hbt68iWXLluHatWt+l59jlE1YzDnDLbysHWAZ2xmBY+KNc5bYi+EjzDhnMCLP4sWLER8fL/vnb91sBjBlyhS355MuIca4vUhNTcV7772Hr776CikpKZFuTtCwcSH8MM85wy30BJklhWMECt93mJHF8BWTjWVrZzAiTY8ePVxqavNI494Z3hk2bBh69eol+1lMTAyqVq3qNms5o/Ryu13TaBkXbrfzSsOMc4Zb6ERwLCkcI1BMzDhn+AnznDMYkSchIQEJCQmRbsZtQ7ly5VCuXLlIN4PBCAo2LoQfJmtnuMVKy9qZ55wRICarvaZkbrEZHHf7rnQyQgffZwCWq4DBYDAYDEbZgRnnDLfQ3nJmnDMChe87HAEKzdYIt4ZRGqDHm/woN86NFhsbHxkMBoPBYIQEZpwz3EIngWMJ4RiBYqIlyix+mOEDpaXOuY0j6DLnLzz6yd9MFcJgMBgMBiNomHHOcAuTtTNCgcnKknsx/INeDMwzWKLW8M03WJB5qxhnbhSy2HgGg8FgMBhBw4xzhlvECeGic3LMiH5MLLkXw0/oxcBoDoegFxFyotjDz2AwGAwGo3TAjHOGW8ws5pwRAkwWOrkXM2AY3pGON9EaDkG3MydK28hglGW++uorVK9eHUqlEnPmzPHpOw8++CBGjRoV1nZFGwsXLkRycnLI97tt2zYoFArk5ub6/B3p+a9Vq5bP147hOwqFAr/++mukm8GQgRnnDLeIEsLZbB62ZDDcY2Y1qxl+YOMIrA4Zu1Zlf0RFq+LCYisdsfGM6KNHjx6oUaMG9Ho90tLS0L9/f1y5ckW0zfr163HfffchISEBFStWRM+ePZGZmSm7vx07dkCtVuOuu+4SvT9o0CA88cQTHtuya9cuPPTQQ4iLi0NiYiIeeOABGAwG4fNatWpBoVCI/qZNmxbIzw4p0jYpFAosW7ZM+Dw/Px8jR47EuHHjcPnyZQwZMgT//PMP2rZti/LlyyMmJgYNGzbE7Nmz/T72ypUr0alTJ5QvXx4KhQLp6ek+f/fChQvo1q0bYmNjkZqairFjx8Jqjaw6qHfv3jh16lRE28CzcuVKvP/++yVyrLK4EMNz9epVdO3aNah9sIWT8MCMc4ZbxDHnTNbOCAyThcnaGb5De6MrJugARG+/ocN9mOec4Q8dOnTATz/9hJMnT+KXX37B2bNn8fTTTwufZ2Rk4PHHH8dDDz2E9PR0rF+/HllZWXjqqadc9pWbm4sBAwbg4Ycf9rsdu3btQpcuXdCpUyfs3bsX+/btw8iRI6FUiqeH7733Hq5evSr8vfzyy/7/6DCwYMECUbvohYgLFy7AYrGgW7duSEtLQ2xsLOLi4jBy5Ej89ddfOH78ON555x288847+Oqrr/w6blFREdq1a4fp06f7/B2LxQKbzYZu3brBbDZj586dWLRoERYuXIh3333Xr+OHmpiYGKSmpka0DTzlypVjdbRLgMqVK0On00W6GQwZmHHOcIuFZWtnhAAz8y4y/EDOOI9WxQXznEcAQgBzUWT+iH+L1BzHYcaMGahbty50Oh1q1KiBDz/8EAAwevRo3HfffahZsybatGmDN998E7t374bFYu/rBw4cgM1mwwcffIA6derg//7v/zBmzBikp6cL2/AMGzYM/fr1Q+vWrf0+naNHj8Yrr7yCN998E02aNEGDBg3Qq1cvl0l7QkICKleuLPzFxcX5tP9bt26hb9++qFq1KmJjY9G0aVMsXbpUtM2DDz6Il19+GaNGjUJKSgoqVaqEr7/+GkVFRXjuueeQkJCAunXrYu3atS77T05OFrVLr9cDsMu0mzZtCgC44447oFAokJmZiRYtWqBv375o0qQJatWqhWeffRadO3fG33//Ldqv1WrFyJEjkZSUhAoVKmDChAkg1PXv378/3n33XXTs2NHtb1coFPj888/Ro0cPxMXF4cMPP8SGDRtw7Ngx/Pjjj7jrrrvQtWtXvP/++5g/fz7MZu9jyNmzZ/H444+jUqVKiI+Pxz333INNmzaJtqlVqxY++OADDBgwAPHx8ahZsyZ+//133Lx5E48//jji4+PRrFkz7N+/X/iOVNY+adIk3HXXXfjyyy9RvXp1xMbGolevXsjLy/PYvjVr1qB+/fqIiYlBhw4dXJQevvYHd97swYMH47HHHhO9Z7FYkJqaim+//davYw0aNAjbt2/H3LlzBeVFZmamIMVfv349WrRogZiYGDz00EO4ceMG1q5di0aNGiExMRH9+vVDcXGxsD+TyYRXXnkFqamp0Ov1aNeuHfbt2yd8npOTg//973+oWLEiYmJiUK9ePSxYsMDj+eS5dOkS+vbti3LlyiEuLg5333039uzZA8B5rb777jvUqFED8fHxGD58OGw2G2bMmIHKlSsjNTVVGHt4aFl7ZmamoDxp06YN9Ho97rzzTmzfvt1tmx588EGcP38eo0ePFs6fL+cdAFasWIGmTZsiJiYG5cuXR8eOHVFUVOTTuSgLMOOc4RYzy9bOCAF0zHm0ekAZ0YPJEUKjUAAV4rUAorffmKwsIVyJYykGplSJzJ+l2Hv7KMaPH49p06ZhwoQJOHbsGJYsWYJKlSq5bJednY3FixejTZs20Gg0AICWLVtCqVRiwYIFsNlsyMvLww8//ICOHTsK2wB2z/G5c+cwceJEv0/ljRs3sGfPHqSmpqJNmzaoVKkS2rdvj3/++cdl22nTpqF8+fJo0aIFPvroI59l2EajES1btsSff/6J//77D0OGDEH//v2xd+9e0XaLFi1ChQoVsHfvXrz88st46aWX8Mwzz6BNmzY4ePAgOnXqhP79+4uMIQAYMWIEKlSogHvvvRffffedYED37t1bMFr37t2Lq1evonr16i7tO3ToEHbu3In27du7tEetVmPv3r2YO3cuZs2ahW+++can30wzadIkPPnkkzhy5AgGDx6MXbt2oWnTpqJ+0LlzZ+Tn5+Po0aNe91dYWIhHH30UmzdvxqFDh9ClSxd0794dFy5cEG03e/ZstG3bFocOHUK3bt3Qv39/DBgwAM8++ywOHjyIOnXqYMCAAaIFBylnzpzBTz/9hD/++APr1q3DoUOHMHz4cLfbX7x4EU899RS6d++O9PR0vPDCC3jzzTdF2/jaH9zxwgsvYN26dbh69arw3urVq1FcXIzevXv7day5c+eidevWePHFFwXlBd1HJk2ahHnz5mHnzp24ePEievXqhTlz5mDJkiX4888/sWHDBnz66afC9m+88QZ++eUXLFq0CAcPHkTdunXRuXNnZGdnA4AwDqxduxbHjx/H559/jgoVKnj9zYWFhWjfvj0uX76M33//Hf/++y/eeOMNcJzz+XP27FmsXbsW69atw9KlS/Htt9+iW7duuHTpErZv347p06fjnXfeEQx6d4wdOxavv/46Dh06hNatW6N79+64deuW7LYrV65EtWrVRKoaX8771atX0bdvXwwePBjHjx/Htm3b8NRTT3nsi2UOUobIy8sjAEheXl6km1Iq6DhzG6k5bjWpOW41WbLnfKSbwyil1H97jdCPXly0L9LNYUQ5F7OLSM1xq0n9t9eQ15ank5rjVpP5W09Hulmy7DqbJfTt8SsPR7o5tyUGg4EcO3aMGAwG+xumQkImJkbmz1Toc7vz8/OJTqcjX3/9tdtt3njjDRIbG0sAkPvuu49kZWWJPt+2bRtJTU0lKpWKACCtW7cmOTk5wuenTp0iqamp5OTJk4QQQiZOnEiaN28u2sfAgQPJ448/Lnv8Xbt2EQCkXLly5LvvviMHDx4ko0aNIlqtlpw6dUrYbubMmWTr1q3k33//JZ9//jlJTk4mo0eP9vlcSOnWrRt5/fXXhdft27cn7dq1E15brVYSFxdH+vfvL7x39epVAoDs2rVLeO+9994j//zzDzl48CCZNm0a0el0ZO7cucLnhw4dIgBIRkaGSxuqVq1KtFotUSqV5L333hN91r59e9KoUSPCcZzw3rhx40ijRo1c9pORkUEAkEOHDrl8BoCMGjVK9N6LL75IOnXqJHqvqKiIACBr1qxx2YcvNGnShHz66afC65o1a5Jnn31WeM2fuwkTJgjv8df+6tWrhBBCFixYQJKSkoTPJ06cSFQqFbl06ZLw3tq1a4lSqRS+I2X8+PGkcePGovfGjRtHAIj6rRS5/vDqq6+Kfs/s2bOF140bNybTp08XXnfv3p0MGjTI7f79ORYhhGzdupUAIJs2bRLemzp1KgFAzp49K7w3dOhQ0rlzZ0IIIYWFhUSj0ZDFixcLn5vNZlKlShUyY8YMoZ3PPfecT+2k+fLLL0lCQgK5deuW7OcTJ04ksbGxJD8/X3ivc+fOpFatWsRmswnvNWjQgEydOlV4DYCsWrWKEOLsx9OmTRM+t1gspFq1aqJzLUV6bdxBn/cDBw4QACQzM9Pr90ojLs8sCl/tUHVJLwYwSg8Wlq2dESSEELGsPUo9oIzogfdGa9VKJMfaPYTR6jlnsvYIoIkF3rrifbtwHdtHjh8/DpPJ5DEOfOzYsXj++edx/vx5TJ48GQMGDMDq1auhUChw7do1vPjiixg4cCD69u2LgoICvPvuu3j66aexceNGcByHfv36YfLkyahfv35AP4f3vA0dOhTPPfccAKBFixbYvHkzvvvuO0ydOhUA8NprrwnfadasGbRaLYYOHYqpU6d6jVm12WyYMmUKfvrpJ1y+fBlmsxkmkwmxseJz2axZM+H/KpUK5cuXF2TpAARP840bN4T3JkyYIPy/RYsWKCoqwkcffYRXXnnF62//+++/UVhYiN27d+PNN99E3bp10bdvX+Hz++67T5DpAkDr1q0xc+ZM2Gw2qFQqr/vnufvuu33e1hcKCwsxadIk/Pnnn7h69SqsVisMBoOL55w+n/y5c3c+K1euLHusGjVqoGrVqsLr1q1bg+M4nDx5UvY7x48fR6tWrUTvSUMtfO0PnnjhhRfw1Vdf4Y033sD169exdu1abNmyxWW7YI8lPYexsbG44447UGC0QKFQoFKlSoI3+OzZs7BYLGjbtq3wHY1Gg3vvvRfHjx8HALz00kvo2bOnoAR54okn0KZNG6/tSE9PR4sWLVCuXDm329SqVUsUp1+pUiWoVCpR7ohKlSqJ7h856OulVqtx9913C+33FW/nvXnz5nj44YfRtGlTdO7cGZ06dcLTTz+NlJQUv45zO8OMc4Zb6GRHFhZzzggAi42IwjSjtSQWI3rgFwJ1aiWSYhzGeZT2G3pczCmKzjbedigUgNa3eOdIEhMT43WbChUqoEKFCqhfvz4aNWqE6tWrY/fu3WjdujXmz5+PpKQkzJgxQ9j+xx9/RPXq1bFnzx40bNgQ+/fvx6FDhzBy5EgAdmObEAK1Wo0NGzbgoYce8nj8tLQ0AEDjxo1F7zdq1MjF2KNp1aoVrFYrMjMz0aBBA4/H+OijjzB37lzMmTMHTZs2RVxcHEaNGuUSX01L9QF7PCz9Hm8o01JeuXa9//77MJlMXhcNateuDcBusF6/fh2TJk0SGeehQhqbX7lyZRcJ9/Xr14XPvDFmzBhs3LgRH3/8MerWrYuYmBg8/fTTHs8nf+78PZ/hwNf+4IkBAwbgzTffxK5du7Bz507Url0b999/f8iPJT1fGo0GNo5D5q1iISbYn/PXtWtXnD9/HmvWrMHGjRvx8MMPY8SIEfj44489fs+XscTb/cO/VxLX29t5V6lU2LhxI3bu3CmEBrz99tvYs2ePcF+WdVjMOcMttMfTxDznjACQJhJkdc4Z3nAa56qo95zTVSxYzDmDpl69eoiJicHmzZt92p6fNJtMJgBAcXGxS8Z03mPLcRwSExNx5MgRpKenC3/Dhg1DgwYNkJ6e7uLBlKNWrVqoUqUKTp48KXr/1KlTqFmzptvvpaenQ6lU+pTde8eOHXj88cfx7LPPonnz5rjjjjvCVrIrPT0dKSkpfmeg5jhOOO880tjc3bt3o169en55zeVo3bo1jhw5IvJgbty4EYmJiS6LJHLs2LEDgwYNwpNPPommTZuicuXKbsvrBcuFCxdE5f12794NpVLpdkGmUaNGLgsPu3fvFr0ORX8oX748nnjiCSxYsAALFy4UVB9SfDmWVquFzY9SwXaHA4FNEh9dp04daLVa7Nixw7mtxYJ9+/aJrmvFihUxcOBA/Pjjj5gzZ45PVQKaNWuG9PR0IXY9nNDXy2q14sCBA2jUqJHb7eXOny/nXaFQoG3btpg8eTIOHToErVaLVatWhfbHlGKY55zhFiuTtTOChE4GB0Rv1m1G9MAv6Ggpz3m09htxJYLobCMjMuj1eowbNw5vvPEGtFot2rZti5s3b+Lo0aO48847sW/fPrRr1w4pKSk4e/YsJkyYgDp16giy0m7dumH27Nl47733BFn7W2+9hZo1a6JFixZQKpW48847Rcfks0RL38/Ly3Opw12+fHlUr14dY8eOxcSJE9G8eXPcddddWLRoEU6cOIEVK1YAsJda27NnDzp06ICEhATs2rULo0ePxrPPPuuTDLVevXpYsWIFdu7ciZSUFMyaNQvXr1/3yRD1xB9//IHr16/jvvvug16vx8aNGzFlyhSMGTPG4/fmz5+PGjVqoGHDhgCAv/76Cx9//LGLFP7ChQt47bXXMHToUBw8eBCffvopZs6cKXyenZ0tMl75BQ4+a7w7OnXqhMaNG6N///6YMWMGrl27hnfeeQcjRozwaVGhXr16WLlyJbp37w6FQoEJEyaEzRuq1+sxcOBAfPzxx8jPz8crr7yCXr16Cb9v1apVGD9+PE6cOAHAXjVg5syZGDt2LF544QUcOHAACxcudGl/KPrDCy+8gMceeww2mw0DBw4EAMybNw+rVq0SFsR8OVatWrWwZ88eZGZmIj4+3qN0HBDPi2nzPC4uDi+99BLGjh2LcuXKoUaNGpgxYwaKi4vx/PPPAwDeffddtGzZEk2aNIHJZMLq1as9Gr48ffv2xZQpU/DEE09g6tSpSEtLw6FDh1ClSpWAKjR4Yv78+ahXrx4aNWqE2bNnIycnB4MHDxY+b9iwIaZOnYonn3wSgP38/fXXX+jTpw90Oh0qVKjg9bzv2bMHmzdvRqdOnZCamoo9e/bg5s2bPp2LsgIzzhluCaWsPd9owW+HLqPLnWlCeSTG7Y9UcWGycjBabNBrgvM+MG5f+IVArYqStUep59zCsrUzPDBhwgSo1Wq8++67uHLlCtLS0jBs2DDExsZi5cqVmDhxIoqKipCWloYuXbrgnXfeEQy0hx56CEuWLMGMGTMwY8YMxMbGonXr1li3bp1PMleabdu2oUWLFqL3nn/+eXzzzTcYNWoUjEYjRo8ejezsbDRv3hwbN25EnTp1AAA6nQ7Lli3DpEmTYDKZULt2bYwePVoUh+6Jd955B+fOnUPnzp0RGxuLIUOG4IknnvBakssbGo0G8+fPx+jRo0EIQd26dTFr1iy8+OKLHr/HcRzGjx+PjIwMqNVq1KlTB9OnT8fQoUNF2w0YMAAGgwH33nsvVCoVXn31VQwZMkT4/Pfffxd5bPv06QMAmDhxIiZNmuT2+CqVCqtXr8ZLL72E1q1bIy4uDgMHDsR7773n0++eNWsWBg8ejDZt2qBChQoYN24c8vPzffquv9StWxdPPfUUHn30UWRnZ+Oxxx7DZ599Jnyel5cnUl3UqFEDv/zyC0aPHo1PP/0U9957L6ZMmSIy7kLVHzp27Ii0tDQ0adIEVapUAQBkZWXh7Nmzfh1rzJgxGDhwIBo3bgyDwYCMjAyPx7VyznmxNLn4tGnTwHEc+vfvj4KCAtx9991Yv369sIil1Woxfvx4ZGZmIiYmBvfffz+WLVvm9bdqtVps2LABr7/+Oh599FFYrVY0btwY8+fP9/pdf5k2bRqmTZuG9PR01K1bF7///rsoo/zJkydF5++9997D0KFDUadOHZhMJhBCvJ73xMRE/PXXX5gzZw7y8/NRs2ZNzJw5E127dg357ymtKAgpO7nr8/PzkZSUhLy8PCQmJka6OVFP/XfWChPlF9rVxjuPBb7S/e0/GXh/9TG8eH9tvN0tuBVzRukhM6sID368DfE6NQwWG2wcwZ63HkalRH2km8aIUracuI7BC/ejadUkvPd4Ezz52U5UTY7Bjjc9x89GgmV7L+DNlUeE18ff64IYLVt4CiVGoxEZGRmoXbu2UMOawWCEl0mTJuHXX391UVxEC4WFhahatSoWLFiAp556qsSOm1VgwpU8AwCgYeVEaNW3R3RwZmYmateujUOHDuGuu+6KdHNKNZ6eWb7aobdHr2KEBVq+E6znnM9knFXIvEtlCZNMci8m/2V4wizK1h7ddc6l4yLznjMYDEb44DgON27cwPvvv4/k5GT06NGjRI9vpUIIypBvk1HCMOOcIYuNI6DUOy6JvfyFl8gbzL4n3mCUfujM28mCcc4MGIZ75BZ0Ck3WqKwYYbaJJ2fMOGeUNbp27Yr4+HjZvylTpkS6eaWOJk2auD2fixcvjnTzIs6FCxdQqVIlLFmyBN999x3U6pKNzrVSY36oTPMpU6a4veZM6l02YTHnDFmkE+Fgs7XzXvhiCzPOyxImq/166zQqJDkyb7Na5wxP0J7zRL3zEZVvsKB8fHTlq5AmymSqEEZZ45tvvoHBYJD9zFtyLYYra9asgcUiP47wtclLgkmTJnmMnY8UtWrViqjHmo4550LUjmHDhqFXr16yn/mbXyJQIn1eGWKYcc6QReopt9iCu2n5Ac1gtga1H0bpwkQl90qO8uRejOhAyNauUkKtUiJBp0aByYrcKDTOmaydUdapWrVqpJtwW+GphB0j8nhKCBco5cqVYwtZDBFM1s6QxSoxxs3W4Dze/CTWwDznZQpB1q6hMm8z7yLDAyaL03MOQFBcROOijqtxHn1tvF1gXh0GgxFpRKXU2JjEkCEU/YIZ5wxZpJPOYOuc88Z+MYs5L1PwsnatypncK9fAvIsM99B1zgFE9aKOVGGUW8T6dqhRqezZ781mdm4ZDEbkIISIPecRbAsjeikuLgZgL/cYKEzWzpBFaowHK2u3ODJcsoRwZQuTjOecxeUyPEEnEQSA5Gj2nFulCeGir42lHbVajdjYWNy8eRMajQZKJfMpMBiMksfGcbBZTMJro1EFDdiclmGHEILi4mLcuHEDycnJwsJyIDDjnCELvToIhM5zzmTtZQtn5m2VYGSxhHAMT5ipPgOAWtSJPs+p2WYfz/QaJYwWjsWchwGFQoG0tDRkZGTg/PnzkW4Og8Eoo1htHG7kO41zW54WMdrADTDG7UlycjIqV64c1D6Ycc6QxUXWHmQZI742JJO1ly1ECeEcxnk+M84ZHnCVtfO1zqMvmSTvOU9N0ONCdjEzzsOEVqtFvXr1mLSdwWBEjP8u52LSb+nC63FdGqJTg+CMMMbthUajCcpjzsOMc4YsUk95sJ5zXhZvtnKwcQQqpSKo/TFKB3IJ4ZisPXIYzDacvlGAplWToFBE5z1ophZ0AMpzHoW5CvhFzEqJOodxzvp2uFAqldDr9ZFuBoPBKKNkGQguFzgdTMU2NiYxwgML3mLIEnLPOfV9Jm0vO9AJ4XgPaDQaWWWFd3/7Dz3m7cCOM7ci3RS3CH2mFMSc8+NiaoJ9ghaN0nsGg8FgBE9WoXh8D3ZezGC4o9QY51OnTsU999yDhIQEpKam4oknnsDJkycj3azbFmnMudRYD2Z/xazWeZmBL4ul0zhl7cxzHjnOZ9uziP57KTeyDfGAEApRCrK18+NixQR7/fUclq2dwWAwbkuyCk2i18EqShkMd5Qa43z79u0YMWIEdu/ejY0bN8JisaBTp04oKiqKdNNuSywhl7VTnnMWd15m4FeWdWoVkh1GVoHRChvHipBEAqNDtZKZFb3jpku29pgo9pw72pqaaDfO841WkUqIwWAwGLcHUuPcxIxzRpgoNTHn69atE71euHAhUlNTceDAATzwwAMRatXtC29UqZUKWDkSsmztAJO1lyV4z7lW7Yw5B+xJ4VLitJFqVpmFXxg7f6s4wi1xj9mN5zwas/zzuTQqxuuE9/IMFpSnXjMYDAaj9JNVYFdGKRUAR5jnnBE+So3nXEpeXh4AoFy5cm63MZlMyM/PF/0xfIM3puN09vWb4LO107J2ZpyXFfhSUzq1EmqVEgmO/hSNhlZZgF8YO58dxZ5zmyQhXCmIOY/VqpGgt/dtlhSOwWAwbj9uFdk955US7TlGWMw5I1yUSuOc4ziMGjUKbdu2xZ133ul2u6lTpyIpKUn4q169egm2snTDy9DjeeM8WM85x2TtZRHacw4AiVFcs7oswMvar+ebojb3g7TP0DHnhERXOISFKvuWEutIeMj6NoPBYNx28Anh0pIcxjnznDPCRKk0zkeMGIH//vsPy5Yt87jd+PHjkZeXJ/xdvHixhFpY+nF6hOz1+oJOCGdjnvOyiMnqjDkHnJm3mec8MtALYxeyo1Pa7sxTwGdr1wrvGy3RNRnix0WNSoEUR98uC57zy7kGHI7ipIIMBoMRarIK7J7zKskxAJhxzggfpc44HzlyJFavXo2tW7eiWrVqHrfV6XRITEwU/TF8g4+ljHV4zjmCoBIdWVgptTKJS3Kv2OjNvH27QwgR3XvRGndulizoxGlVUCntNdmjrQwfXZOdX0TIKQOe80Hf7cUT83fger4x0k1hBMi5m4U4c6Mw0s1gMEoFRosNBSa72qwqM84ZYabUGOeEEIwcORKrVq3Cli1bULt27Ug36baGN8TjHJ5zILj4Gjrm3BClclpG6HGpWR3DpL+RwmzjQCfJP38rOuPOpQnhFApF1GZs5xcxNWql4Dm/3fu2jSM4e7MQHAGu5Boi3RxGAFhsHJ76fCeenL9DGKMZDIZ7bjnKZGpVSpSPd6q5GIxwUGqytY8YMQJLlizBb7/9hoSEBFy7dg0AkJSUhJiYmAi37vaD93TzCeEAwGIlQIAJtpmsvWwilSg7k3uxBZqSxmgWTyQyo9VzbhMb54A97vxWkRm5Uaa44BcSNCLPeXS1MdRkF5mFRZ4iExvLSyM5xc57Kc9gQWqCyss3GIyyDS9pLx+vFVRdzHPOCBelxnP++eefIy8vDw8++CDS0tKEv+XLl0e6abclZocxrdeooLArSmGyBT4RY7L2sgmf3EvnUhbr9vYuRiPS+y5aPecmRzv5bO1A9GZst1CZ5ctKQribBc5av4Wm6LoeDN/IKXJeN7bAwmB4h69xXj5eKywcszrnjHBRajzn0Zal93bHSk06NSolzFYuqFVCsaydTQbKCi4J4WJYzHmkcDXOS5fnHIi+fuPM1q5ASpwjIVxRdLUx1NwsdBrnBUamgCmN0HkRikzsGjIY3rjlyNReIV4nLBwzWTsjXJQazzmjZKEnnTrHQGSxBb5AQnvOmay97OAuIRzL1l7y8ItiGpVdCnMl1xCV8aYmSZ8BELUx5/Ky9rLkOWeGXWkkp8jZR9nzmMHwDr8oWSFeJywcm6Pw+cm4PWDGOUMWXtauViqpgSgIzzll2DNZe9lBmhAuiSWEixj8fVc5SY84rQocAS7lRF9CL2lCOCB6wyGEhHAqOiFcdC0ghBraOGde19JJNvOcMxh+kSVrnDPPOSM8MOOcIYuzfq9d1k6/FwhWjoo5Zyv1ZQZpWazkKI0dLgsYHcZ5rEaNGuXjAERf3DkhRF7W7vBKR1O/oduqoWLOy5LnvIAZdqUSegGpiFVPYTC84pS1O2POmaydES6Ycc6QhY8516gVQSe/IISIJPHFbDJQZjBJvKDMOI8c/KKYXqtCrfKxAKIv7tzKEfDpRXQqZwZpwXMeRV5pOo+GVq10hmwUW27rHCl0zHkhizkvlWTTsnaWEI7B8ArtOedDPZnnnBEumHHOkEWQayqVQoxqoAORjRNPVA0WNqCVFaTxw7SRdTsbMNEIL2uP0ShRU/CcR5dxTi8AaqM85pxWEtHZ2s027raO471ZYBT+z2LOSyd0zDm7hgyGd5isnVGSMOOcIQst19Q6JMmBytqtUuOcec7LDIKsXePwnDtizq0cQdFtbMBEI07j3Ok5z4wyWbvZjXGeFIXGOd1WjUqBWK1KyOJ7O0vbWcx56Yfun0zJxmB4J8sha6dLqTHjnBEumHHOkEVO1h7oQCQ16m9nrxLDCcdR8cMOo0WvcSYYZEnhShY+5jxGq0KNKJW182OMWqmASqkQ3o/GcAi+bysUgEqpgEKhEEnbb1duFLBSaqWdbFHMOXseMxiesNo4YUFL5DlnMeeMMMGMc4YsvKxdq1JCy8vaA/Wc26SydjYZKAvQ/UWnsasvFApFVEqUywJCzLlGhVoOWfulnGJhIS4akMvUDkRnzDmdqV2hsI+Rt3tSOKPFJjLImSS6dJLLsrUzGD6TXWwGIYBSAZSL0wrOhkDzMDEY3mDGOUMW3rBSK52e80Bl7RZO/D2Wrb1sIIofVlHxw7wXNIoMrbIALWuvnKiHVq2ExUZwNc/o5Zslh9kmLr3Hk+ToM/lGCzguOnIVWPiQDZm+nXOb9m1a0g4w47y0QieEK2IJ4RgMj2QV2O+XcnFaqJTBq0kZDG8w45whCz/x1KiVga8SmgqBXZ+B5FwQvc1k7aUXf5K48TXOFQoISQUBumb17WnARCu0ca5UKlCzXPTFnRst4jAIHr7PEBI9Umqh3CS1kMB7zm/XkA06UzvAvK6lEYuNE91DLOacwfDMrSL7uFc+TgcAIlk7S2zLCAfMOGfIwidxC6rO+aZJwPrxqLCkk+htJmsvnaw6dAnNJm3AzjNZPm1vpjK187JfAEiK4Q0YZpyXJEazM+YcAGoKSeFCGHd+agPweTvg2pGAvs4rdvgEgjw6tQoxjtCIkgqHOH+rCM8v3If9mdmyn/OLlfTCU0qcw3NeFL42chyJmMea95ynJtgnqdGyUMLwHem4y9QPDIZnhEztCfa5C1/mkxDXhMcMRihgxjlDFsErpApCwpOxHQCgMuaI3jZbOZfyaozo5+/TWSgwWbHmv6s+bS/UOJd4QYWkWYbb07sYrfCLYnoNb5zb484vhNJzvuQZ4PoRYEmfgL5udtNnAFpxUTL95vf0K9h84gaW77so+7mFqmjBk1wCMeevLk/H3R9sxLUIhCPwxnmtCva+Y7JyAYc7MSKDtG8yJVsJYDUBRbci3QpGgPCy9grxYs85wKTtjPDAjHOGLGarc+LJT5T9HoSUGtHLOIfHDmBSutIIn+37+NUCn7Z3llFTid4XEsIxz3mJYnBIxnkPdK1weM55Cq8H9DVnQjiVy2clnbGd95a4U/oISTNFsnY+cV34jPOD53NgtHA4ed23+zCU8MZ5bcfCDsCk7aUNOt4cYNevRPjhKWB2E6BYXoXDiG6y3MjaAWacM8IDM84ZstCy9oATwqkkxrlODV7dzKTtpQ8+kd+Jq/k+JeXy5jln2dpLFoNE1l7DYWCdD0fMORfYtXWXrR0AEks4Yztfbspdrg2LzbV/Oz3n4WtjkWNhszgCRhUfc56WrIfeEXrApO2lC37hSO0oVVjEFsrDz83jgNUA5F7wvi0j6hA85w5Zu4oq9cnKqTHCATPOGbLQE8+AZe0qreilRqVErMNrxzK2lz74BZUisw0Xc7x7W02O7aXxw0mxLOY8EhiphHCA03N+/lZx1GRAN8lkQOdJETKhl4ysPdvhLTG6WUg0y8jay4VZ1k4IETydkYgV5j3nFRN0iNdpItYORuBkO/IhVEmOAQAUs2zt4cfmeNYRZsi5I6fIjLdXHcG/F3Mj3RQXhJhzh6wdQOCKUgbDB5hxzpCFH3DUKoUw+TTb/JzASzznapVC8NqxOLfSBy+LBoDjV/O9bi8k95JIlEs6dphhRxpzXjU5BmqlAiYrhxuSElmRwl0pNcBuEAJAVgm19VahvX+685ybPSWEC5Nxbo/xto/DkZAjC8Z5vA7xOns/YsZ56YLvm1UdxjnznJcAVseYxYxzt6z97xoW77mAr/4+F+mmuMAb5xVp41zNap0zwgczzhmy0MmOAvacK9Wil2ql0zhnsvbSh5FaUDnmQ9y5ySIvUU4uYXkyw45U1q5WKVEtxT5Bj5ZyanSGfykV4/UAUGILCXxsrjdZu1xCuNwwZWunDfKiCCxwijznevv4zozz0kWOo1/z977RwsFahqW520/dxJkbYczfQAhgcyzWMePcLXyYm8FsAy7tB2Y1AY6uinCr7PALteXjnWpQVuucEU6Ycc6QRRRzLnjO/ZwMSjzndlm7fULHZO2lD3pBxT/POYs5jwaksnYgzHHnAeAp5pz3nN8sAeOcECJ4GE1uE8K5tpWvc15gsoYlizltCJe055wQIpG1O4xzFnNeqsgu5o3zWOG94jK6WH4ppxgDv9uLYT8eDN9BOCsAh+qQK5vn2Rf48cxi44AlvYH8S8DPgyLbKNjHPb7OuaysvQwvbDHCBzPOGbJYKMmmkBDO6q+sXRxzrlYpoGey9lKLv8a5yeqIOXfxnNv7BTPOSxaDjHEe1oztAWDyYJzztbVLwnNuN67t4507zwg/HtIJ4ZJiNELSy3AoQ2jjvKTH0HyDVZiIVohnMeelFb5fVkrUCUnhymrcOT+WXA9nWUIbFeJCyuZ59gU+vMJs5QCT9/lFSZFnsAjPAtpzrmOec0YYYcY5QxY+vlzsOQ88W7saVqiVVEK4MrpSX5qhZe2XcgzIN3o2PnhZu9Q4T3J4zovNNsGAZ4QfwTjXOq+Hs9Z5qIxzhfdNPGCWyYDOU5Ke8+xC54Tan4RwKqUCifrwlVOjvdQlbRTfLLQbMIl6NfQalTPmnHnOSxV8uEa5OC1itWU7bwD/TCu22EBImJJiioxz1zlUnsGCrnP/xiebT4fn+KUEkefcFj35aLIcz4IEvVqUP4fJ2hnhhBnnDFmsnHPiySc8CqbOuR5maFQKYTJgYEloSh28cccb2ye8xJ27SwiXoFPD4bBh3vMShA8l0ct6zkMka1frvG/jAXd5CgAgNdGREK7QFPbs8reoWtBeE8JJ2urMKh/6vk0n7you4TGU9zKmJtpj/1nMeemED9dIidMKoQkl3ZeiBf6ZZuNI+OTJVsrQlJG1p1/MxfGr+fhp/8XwHL+UUORQb1j8TTwcZuSSwQGUce5vuCeD4QPMOGfIYqFqVGsdxpXfDy+Fs3vFwAS1UikvaycE4NjqYzRjsXFCHoKmVZMAeJe2uzO0lEqFULM6jyWFKxE4jghGJi1rr0mVUwuF58im1HrfyAPuFnQAoHycfXJk5UjYy6ll+2CcOxPCidUCyWEsp1ZIyY8LS1iKTGdqBxB6WTshwLbpwH8rQ7M/hix8QriUWC1iHcZ5URmVtdMKvrDlwfHiOS923D83Ckzh896XAkSy9ihCrowaUEZKqZ1cC2x+n83PIwAzzhmy8KuXairm3O9BiHMaXjEKM9QqhSBrF4xzQoBvOwFftQ9qABix+CAen7+jTGedxa2zwO4vnGVbQgg9ifm/mikAvBvn7hLCAVTG9kh7zq0mex+8zTFS4QN8tnbAmRSq0GQN2tObkVWEbFOQsnYPMedatRLl4uyG783C8Erbc0TGueeEcNL+zXvOwy1rLy5pWTuVDA6wyzylbQqK7HPAtinAujdDsz+GC1Ybh3zH9UqJ1SDOMRZEoixfNEAb5GHL4eDFOOerLpitHPINZfM6ABJZexTBl+6skCBeeC7JUmo2jqDASxhhWNgwAfj7Y+D6kZI/dhmHGecMFwghonhK3jPk96Bpo4xzmOzZ2h2TASGO01wEXNoLXDtsz84ZAIUmK/48chX/XszFpRxDQPu4Ldg8GVg3Dji1LuS75mPzlAqgebVkAL54zt3XrOa9i7cKIxhblpMJfJAK/DYicm0oIehJqJ7ySus1KlRwJLm5mhfcvbM34xZM0Hjf0AOejHPA6bW9kR9e45yWtVtsBDYZGT2dl4MmRfCch0HWbopkzLnYOI8LdbyyxSD+lxFy+MVQhcKevDCO95yXUVk7nU8ibHlwbJ5l7XRIwY2CMCami3J4JVC0ZT/nnwW8counJGPOh/14APd+uBk38ku4f7AxOWIw45zhAj0R1aqUXrNSbj15A19sP+sqyeKcD51YmKBWymRrp1eSFYF1RzqZVUFZTk5kyBH/G8pdU5m+G6UlAABOXCvwqFQweahZXdkRt3q9pB82NLu/sP+bvjhybSgh6HwBSqXYu105KTTX4vClPJiJ2vmGzf970VOdc8AZdx7upHDZReL9y3nP5eqcA+GVtRdIs7VvmAB880jAahmLjfN50VXqOY/Xh0bW/lv6ZWw5cd2ptOLK8BgeZnhFSFKMBmqVErFaPuacydojJWunQwrCXoki7zJwakNUqsWi1nPuTdZeAu3dfe4WDBYbTl73nOcn5PD9lZUALHGYcR6tXDsC7F8QkUGUTsghkrW7GYTeWfUfpq09gbM3C8UfUJMsvcIsqnMua5wHmOmZrtEcEelPtMArFWyhPwfOTN8q1CwfhxiNCiYr5zGRmNM4d40f5g3Cq+EsYeOV6JughAsjdf2kVE6MARD8tThyOU/sObf4nwGeN4LlsrUDlOc8zJNY2nMOOPMn0AgJ4Vw85w5Ze1F4PedFJiuw8xO78uj4H37vixCC7p/+g85z/vIpHMg15jz4hHC3Ck0YtTwdLy85BMIv5oTQOL+YXYwZ606UaY8kTTYVbw4AcbqyLmt39vuwLVDQCeFkSqmVqOd8dmNgyTPAiT/De5wA4M9DtCWEu1lgv37uZO3h9pwXGC2C06nEwx74/spKAJY4zDiPVv4cA6weBVzaX+KHpo1wu6zd8yDEx1a6JCiSyNrVVLZ2QU5Gr8gpAjTOs51GQH5Z9pzzK/ThMM6pTN8qpQINKtu958c8ZGz3VLM6zWGcXwtSSh0UUeg9CBf8JJROBsfjvBaBTwxNVhuOX82HFdT+A5DCCXkKNG6M8xIqp5YjNc5lxj6LUPZNkhDOERd/4npByKXndHy3SIocgGejyGzDiWsFOHezyKfFDhfPuS74mPObhSYQYm+L1cp7zkM3EVywIxOfbTuLn/cHFjJ1u8GHWvALSHFlPCEcnYsjbBnr/fCcl0SZSADA2S0lcxw/4MdKS5QlWONVVOXjImOc04vm3srXhhzmOY8YzDiPVoy5jn/zSvzQVpFxrvAo3yGEoNjiTGgiwkXWTmdrl/GSyDy4fOG29JzfOgtsmwYYcn3/Dr9CH4YaobSsHQAapSUC8Bx37kminJYcGm9tcJQh41xy/Wiqx1kxXPUrzDfOBLz/U9cKYbERqOhzGoDn3ExViZBDMM7DnBAu28U4dy9rly4+taieDIUC+PdiLh6ZtR0bj10PWbsKKQPCSHvzla7X1Ru0tzTLh/PpKmsP3nNOV2swWxznnNhCtnDGT6xLfFIbpQhl1HjPufR5XMagpewlImv3FnMe5lwaznZE1/1gtXHCeCaaZ6qCq/4RCvjxLUEvzqfiLdwzVFzOdS5y54c7ge6NE8CsJsCBhfbXHPOcRwpmnEcrvKEaoMEaDEKmdqUCCoVCqOMrNwgZLZwwj3KJFaI95wqTvc65NFs7F5z3B7CXgeK5bWLOv2wPbJsKrBnr+3fC6DmXyqIbO+LOPRnngkTZk+c8kjHnZclzbnGtcc7TsmAr3tD8hDZXFwW8/8OXcwEASlBjQECydi8J4RL4hHDh7TcusnaZsc9slU8Id2fVJCx67l5ULxeDq3lGvPj9fgz74UBQygQet/LjMBvnFhuHbIdh5+I5D8Y4pyabZrNnIyYQhCRTUeaNixR0jXMAQsx5Wa1VTyeEi3S2dqAEYs55oswTWkxdh2gzznllA68y4SmpmPMrtHEe7kXG1aPsiZn/eNX+WvCcs/GzpGHGebQSwRUraaIjfhCSS9RBr/q6DFLU6qweZjeydtpzzoxzAbNDLp75t+/f4ScBYVgV52XRvHHXuIp3z7nHmPNEZ8x55Gq7ej+u0WLDr4cuu3hSSxu8V0gu5ry82m40KkyBJ5s5csmu8FGJjPMAZO1ejPPUBHu/KWnPuVEmk7O7hHAA8ED9itgwqj2Gta8DlVKBdUev4ZFZ25GZ5T5Hgy+IJeRU/w0gmSYtp83yUjUhu8gMQgCVUoEUwwVg2f+QknfU3iaTNeB7mDbOrSLjXDyOZ2QV4eu/zsleB08Umuz7Z8a5HT5cgy9JyC+wsIRwYgMxpHiVtUcgW3sYFvCDgT4HoqFEFVz1j1DAhw/FSZ6dJSVrFxnnYYg55ziCtUeu2hePpf1CMM5vk3l1KYIZ59EKb6hGYIXTWUbNHkvpSb5DP9RdYoU8ytrlPOf+D3Imqw1XqLjlkMjai24B+771T1IeLvx5iAoJ4cIva29Q2W6cX883uTVcPcnaKzmMc7OVC0vJKZ/wwaBYceASRi1Px8cbTpZAg8KH0YOsnc9zY7MEbvAedhjnGiV1D5v9N0RNvsrawyj/NFpswvjEl5mT95w7xkk3CwkxWhXe7NoQq19uh7qp8ShwlHwMBtrDqQadryMAz7nZd885L2kvH6eFaskzwInVKLf0UQD26h5GmYR5vkAb5xYLNQ5IFmpnbjiJD9ccx+/pV/zaP78AURK1iEsD2Y4khcmOmPPY2zgh3LEr+Ri17JComosUWspujJCsXWycl5DnPAxzhGCgz4FIfaXSyWxdshS785yXUJ3zK7nhjTn/50wWXlp8EO/+9p/rIq+g4C2bi3eRhBnn0UoEbwqrQ9bODz4awXPuaszQEzyXz6lSSlJZu/BQpB9WAUj4L2YbRDZWSDznu+cDf77mjLuJJP54wcOZEE5i3MXr1KhZPhaAe++5J1m7Vq0USpMEW187cLwb5xdz7BO79Au5YW5LePEka09wOCcUnDWgxS2jxYZTjhIvcRoqOZrFAJzb7ldSS2FBR6adgLOUWoHJGrYYUX6xSaNSCLVt5bK1u0sIJ6VRWiI6N6kEIPiET7RxrgE11gUga6dVT1kFnifronjznAwA9v7C5/AsMAU25uS7M84lnhpejn3imn/qDv58lUrPuc0a8tAbPnlrOSHm/Patc/79rkz8mn4FKw+5TwZoKAlZu9Wz55w+bjgXHUVEmSeUVvHEgDoHEZa1m62c4Kzi7xUerco+5oZb1h7umHN+/nUlz+CalJmLnJOwrMOM82iF9yJH4KbgJ51qpRIouI7EzHVQwebVc262SdrqImt31lU1hEjWfiFb7J0LdJIoojjb/q8hO/h9BYs/taJtjodaGFbFjTKy6EaVPUvbPdU5B0KTJTwofJj48jLQ0zcKSucE34EnWbtWYf9MDWtAtc6PX82HlSMoH6eFjt597gXg+x7ANw/7vC+zzbPnPEGnFvpTuDIb0+Wm9BreO+I6NpndJISTg5fjB1tLnvcwKRWAJkjPeaFI1u6b55xXLvDEa4PL9i3ynFvdexh5z7xLuU4vlFrj3GoC5rUElvQO6W75vAHJQim12zdbO59s1FM1AVHMuaUksrXLeM6phZFwLjqKiDrjnHLkgDpfKrXM1iUHvYAZq4u8rD0vDMY5f+/nG6wynnOWEC5SMOM8WhFuipKfVAiydrUC2PA2yq0ejAeUh2G2cS6xhcXUQ91ilXrOxaXUNEoFYrT2LheqhHCZWXbPpuDBCYXnPIz1wv3GL895+Ouc055XPmP7MTfGubf44YjXOvfh3uJloBYbwZkb/hkG0YRT+SBzLRz9RaOwBXQtjly2S9qbVksSx5xnn3X+38eQFW99RqFQCN7zm4Xh6Te3qLhc3oPvqZSaXMy5lEqJoanPzk+kKsTrxJ7zAGLOi6kJ8a0iL8a5w3hPlRrn+uDKqYlizj14znkj6lyWn8a5o13C4kp2BrDsf8DFvQG0tgTJuwTkZPqXc8QHpDHnt3O2dv5e8xRLTnvOI5atXbIwUiLl1KJhbkNBK4JiFNS4HmGPLd8urVrpMs6XhHFu44hoQTccpYL5hZECo8V1kZclhIsYzDiPVrjIGee8rF2jUgKF9jJAFRT2CbhUuu45IZwk5lylRAzvOReMc/dxhr5wwVHjvE7FeAAhGrzCKA/3G79izktO1g4AjYSM7fJSU08J4YAo8Jz7IGvnZaCA+0WI0oCnmHP+PtXAGpBxzsebN6sqMc7pBHM+empk8xTcOA589SBwaj0AoGI8n7E9PJNY3oApH68V2iGfEE4+W7scFUPgOTdZbcIYWylRLzbOAygLSE+I/ZK1U/Ce10AVS7ki49x9Qjh+/LmUY/A5KZyNI8L3hGfTyheBE6uBbx8JqL0lBv/8D/FYzuf3KBfHx5zfvp7zm47kap6MbvqzyGVrd6phgBJKChdlpdTocy/ynEfYOOfbFa9z9eCXhHGeVWgSzbnDIWvny3MWGK0gbmXtt9/iXbTDjPNoRViximC2dqVSkFXzEkqpAS5KCOexlJojW7vGGadjtXHi3xeI59xR47xp1SQAIUoIJxi50ZA0xcdJN8c5B9BwJIQTZNHOIYP3nJ9xI/l2xg9Hq+fc+7nNpo3zK6XXOOevn15G1s7fp2rYAloo4TO1N62WLE7mE4hxLicVP7UeuHIIOPIzgPDXOnd6znXCwpKnhHDuJPg0tOc80MzmtAGVmqCDWhHc2EmP3d5k7bzBwC+M8MQHadyJPOdW2nMu3h8f80+Ic8z3Br34IIxPuRcCameJw98vIZwUW22ccL55WXs8nxDuNvOcW2ycUIHAU7I7OpFhiXjOJcY5IUS4D6umxAAooaRw/oTLuePKISDzn+D3A4nnnI45j7BRyLcrVua5qSuBUmp8vLnasXITjoRw/P1h5Qg42iQkBML8k8naS5xSZZz/9ddf6N69O6pUqQKFQoFff/010k0KHxGM9RDJ2h0PFhVvnFv9MM6pgbWl8hRSizNEMa8Gi00Sc+7/IMdnYm3iKO1128nafYVeCQ9HzLmM57VaSgwSdGpYbATnZSbMQkI4N8aLs9Z59CaEyymiPed54WxMWJFTPghwzgU4f+vOF5utOH3DboQ3q5YUtHFucrRT5DnnS7I5/hXKqYUt5ty+33KxGmFhyeRnKTUp/IKC2coFHDfIS7RjNCokxmigpT3nATwnaIMsu9hsXyx1Q++LH+JTzScuxnkCL2sP0HPur6wdAM7e8M04L5IzzqMg+7NPCNeThGyBnj7XyTEOz7lDySaVVpd26MUmg4+y9rBJ+z0khDNZOdg4+3OoVvk4AMCNIPNS+EQoPOdfPQgs7AYUXA96V/S9GqvwwzjP+As4vyvo47uDvy8i5Tnn4815ZajRwsnmPwkG+t4XRabSfZWzYfq6Exi0YK/H5wQjdJQq47yoqAjNmzfH/PnzI92U8BPJhHBWatLpGMT5pFFSA1wka/dQSi1NkY3HDr0AnZII8eEGs8Q49/O3Wm2ckE07LJ7zkpJ+cTbg6K9AfhBllqzhXW2WizlXKBSo4DA65Mqh8d5GvTvPeaLdUxA5z7nnj20cEU1oj13Jj2BN9uDwbJwH7jk/diUfHLF7cisl6kXGOQmV59xqFP3LG7rhkrVnizzn7svl+JMQTqdWIcVRvipQzxjvxYnXqxGrVYlLqQWwsCmtLey2pKEhB+2Nm9FdtRtpGnHMNz9pDTTmnJZp2mzuPee0EXVOLincjePAjROit2hvnHD9oqBusk+InouhGc/5jPeJejXUjgUlPgO12caVvqR5HrhOjQ2e5OolLmuX9Gv6mHz1k5LxnAc5t6FjkAuCKw8JAEXUedDDfXiLCIsBWPwMsPjp0CgBZODHEGkZNaBkjfN6leJDm1eJgh4nRT+FfqYQGxbtzMS2kzdxLsv/EqkM/ylVxnnXrl3xwQcf4Mknn4x0U8KPUEotAjHnHBVL6RjEdUr7e54852aXUmriB4DekguFIcdZTi1Iz/nVPCMsNgKtWol6lezxz0YL5+rB9xfeKC8pWfuBBcDPA4H59wa+D1t4Pefusn0nOjxncosiTtmv95jzyBi9no+Zb7DAcStArVQg32gVlTUpTfCeRzl5njN0xf+YcyHevJp9cUxBeXBtBkpp4MPCG8cRWGwEfVRbEHdxu/MD3ji32P9NDbOsXTDO47XCYpRcHW+n59xzKTWeYDO2C8a5To14nVoccx6IrF3iLXUrbbc4+3z5hBjRR86Yc/8njISIF79sVnnPOSFE7DmXGucWI/DZfcBnrURtlZW1q0uJ55y+niFScPHJLflkcIA4A/XtlBSO9j67M7oJIaJFH19zGfiNh2zt/AKZXqNEWlIMFOCQenkDkHsxPG3hCXbBh3ZcSOOUA6DIrazdwzUxFdifDeZCwBIeg5G/J+Sem7wi0BRGTzJf47xaSqywEBrquHNaQWUl1LWkzr3VahXuoxKpJsAoXca5v5hMJuTn54v+Sg0RlLWLJp2CcS4fcy6ucy71nMsMIkU3BQOv2GyT1Dn377eed0jaa5SLFYxEwLGy+M9sYEE3wFzs1z4BlLys/fQm+7+mIPonPQEIh3HuxvOaoLd7ouRioUw+xpwXm21hyULqFS8LAny8eYJeLSz+lNa4cyHm3JPnXGHDNT9rzguZ2qsmAwAUtEfFkEsdw/v1Nds4NFFkYJrmGySuoEpICZ5ze9sEz3mYEifxxnn5OC3lOZeRtVt9TwgHOGu0B+rxLxK8OCrEatUhlbUD7o3z4mLnxLd8vNjz7Iw59//+NVhsomRHnFXeiLHYiLBIBgBnb0om4oYc5/9zMoX/FpVqzzmdTyC0nvMUyjjXqJSCB7DoNpp0095nd4sOUjVMiSWEMxcBW6cC1/4TjhmnVaNigg6tlCcw6OIEYM2Y8LSFJ9g+Rc+NAqgUISUgWbuVGv8t4Vk059uVpCUu84WS8JzzzoCqyXokCnOt0M6V6HwhVo4yzilnmdHs7MNhu08YIm5r43zq1KlISkoS/qpXrx7pJvmOkCUxAqXUaFm748GiU3Kiz3joVTQL/RkhwsC6M5aqc+xinAfu/eETA9UsF+uooW7fb4HRAuz/Djj/D3A13a99Aij5bO0hWHkWG+ehb7e7bN+JMbznXPzAsNqcsXTu6pzrNU6pryCn9qG/c1yovOye90OXHWrspWxctCMsrnhICKeBFTnFFr88SIcv5QJwes5po0ppoj3nvhnnlRXZrh9YpJ7z8Mac36LqnHtMCOeHrB2gPOcBLirQnvM4nUTWHsBzQprETWScEwJknQEIQW5ervB2nEp8zyQEUUpNGntvs8pLuaUxw+duFoqVNnT4xK2zwn/pNgmLymq93+2MCOGQtVP9moYvpxbIAkvI2TQJ+G2ET8k6RRTeBK79J7z0xXMu9QCWmKx96xRg+zTgi7bCAlmsToXUBB0qwDFmFt4IT1uENgU5RxA5AELgOQ9E1k4v5oXJOC802ZCIInyU0RNY/qzoM6dx7r7fBBufXe7mXnyvmYo6qmtIcuSJCHWtc/q+FwnEqGe50eQ812FTmDBE3NbG+fjx45GXlyf8XbwYZqlQKImg51wka3cMjlqFvHFOT/BEnnNqUF2YPAy7uUaOL9xErMY+oTO6yNr9+618GbWajkQqCXrKUBSSSAXiOS/hbO0hWHkWy9rDWOdcYtwl6ByruZIHBq2wiDnyI3Bkhex+KyfxcecG4OwWYHpN4L9f3Lbju38y0HzyBiFDeFB4mQDyMbjJsVo0diQcLLWec8dTV1hc4Tjn5IZKCAf4LrsuNFmF+LM7HTkf6AU2JSdRcxTe9Lg/k4UTZ4vlcXjMpZ7zrEKzsAAUSrJlSqnJe859TwgHUBnbA/Sci41zNTSK4JJpFkkkm7cKqeu150tgXkvgwALk5TnvNQV9HKVa8JwHImuXTjI5m7ysnU/Gp1AAKqUCRWabKKZYpDjKpoxzk+s+oBIbplEL/SwMkXHOK4FcjHN36ger2V4TfvfnITm+VwixK94O/Qhc/de/785qCHzRFrh+DIDYc+5Ohitd9PGUOC4opAnhLu4VXvKhJXFaNVIT9M4Ft3Dnuwk65jzU3tsAsrWXgOe82GxFF9VexHCF9hKMFIJx7sYA/+6fDNw5aT0OnM+R/dwX7i9YiwdUR1D35mbBERISWbvVBPw2Eji+2oNx7nxhMjuPyTznJcNtbZzrdDokJiaK/koNEYw5F8vaxZ5zjwnh6JhzenJlUyKLOCbvRVmCgefqOffvt2Y6DAM+kYpIYi3xtvkF/+AqTbUdbdQDLZwx5z56zvnSR1VxE7q1o4Ffnpc1hkW1zn940j7RXjHYbTv+On0T1c1ncOTwgcB/jICPnvNYjeA5P36thI3z4uyQqGeM0uu38FHgkxb2+8MmTvroa9z50ct5IASokqR31r92t8D200Dg47oeS++Ybe6Mc5Po3/LxWigU9oR9OcWh7etWG4dcoRa0lsrWHgrPeXBy/CKJce6TrN1cBFzYLduH+P3VKGcfP0Ux/FknHf+eRkE+tRBGH0epFgy7gDznxVLjXF5FZaQWlvi2ipLC0cb5LXnjXJhAlxbjnH72hGixle/XvFqJh08K5zLpPrrKbpCsezMkx/cKPdfxN8kYf77ObQPgKmuXy2kiNcbDFnMvlbVTr+kFstREnbDgRqxhNs5DKWsPgQOJvldjaeMcxP3zj06C64txTgjw30pR6Isv7VJB/vh8zLk7WfvOs7dgtHDYciKwbPYGsw06m32Om6gyUbL2EPSNvV8Bh34Alv9PpFoQ/RR6DKZk7WFbxGKIuK2N81ILIRGtcy6WtYsn7h4TwtGfUYO3iVMiizgWRopuCgnhis3WoGLOnZ5z3jinDEXe2xbIimqp9JyHW9Zuv7bSxCjuYs75yXCaKtf5psyEwN9a58SQizW6t9Bv75P+Sx9dduZbzHkKJWu/mG0IuazMLVcPAzNqA0t6Bb0rkfLBZgEu7ALyLwHXDgteGo3Cvef8YnYxZqw7gT8PXxUmUkK8uSBpJ+4XE68fsf+74xO3bTRbOXCQSUgjKaWmUSlRzuH9C7W0PddxbRUKe7kpvUPWbpSZgPmdEC7R3tcD9ZzzC2BxOjXipNna3T0nlvUDvutsTzopgVc98eNnVoGMTNRchKIiyvjlxMY5P+YGUic7t9gsSmonNs5dZe16jQp1KtpVUqKkcEZ541xaSo0QIk4IZ3U/vucVWzB/6xlcyglAeRUKaIMkVJ7zIud4RsMnhSuUes4DUZ0FA/3cKsoKcB/2e4sewzgiH5bCLzjz96/RwoUwZIpuk0TWTr3mFwTidGqUi9VC5zDObR76ZkgI1jMv+k3B9096YSRGIfnt7vZPOyQsxYDRi5ruyM/AiueAuc19b5fJ5tY413mJOed/0+nrMtUlfOBKngHxsI/DOlsxEmN4lWIIxoO8y8J/xZ5zOubceS/QnnPDbZQ4MpopVcZ5YWEh0tPTkZ6eDgDIyMhAeno6Lly4ENmGhRpJCYOShk/SQxvnGoesXZqZsthdQjhqQDVyKtwSjPMbgoHnImv3YyGCECIkhHPK2u2DV2Gx0bnfgGTtJZytXc44V7qW7vCIm2ztRSYrvth+VrYOuT+4SwjHJ+KTJinhPY1JKs8P8bRE3jj3bRFFRT+ADe7lYkaLzYcM8L56zrVIitWgarJdgn+ipOLO931t//fMxqB3Jbp+xVRctyZW6DtqYr8+cgslMzecxGfbzmLEkoP4v/c2YsB3e7HqkP0B36xasn0jX1Q+HiZyLsY536cFz7mzXc6kcKE1znkDJilGA7VK6bbOuY1zJinT+ilrDzTmXOo5F2Vrd3fuHZ5EHFzk8hE/dvPjpyjm3OwYLyzFKC6k+rtI1q4KqpRag11jsUc3HMmwx4yLvIUiz7mz797hqPcrSgrnRtZOS+054gjXohPCecjw/POBi/ho/Ul8uf2cX7/JZ2xWe/nMgmvyn1P3yX+XAjRUJeQWO3No0PDX0MVzXNLJ8+ixoTjA3+xQyknHBTkpLt+vaJl/WLyCnjznJmcVDaVSgWSdffzjwmGc08/DUMraQ+BAKqLqieshGdPdPTNoWftvw4FpNTzXPM/Y7v4zNxSaPXjO5YzzvMv2BWhDruCRPn0jQOM814AEhX3+qjAXhdZzTo3jVmpByuwm5txssSAZBfhOMwPPbGkPrH4t+DYwPFKqjPP9+/ejRYsWaNGiBQDgtddeQ4sWLfDuu+9GuGUhhp4ARcBzbhU8Qs4653y2dosbz3l55MFKT65oz7lNgVvwQdbuh4T/ZoEJBosNKqVCMJp4L05xMTUYWoOQtdOenO0zgO+6hCe2SS4hnNLPiZFoJdt57v/49wqmrT2B2RtPBdg4O+6yfQuec5eYc/v2iSr5OFIefz3nhbS8uFBeLnYxuxgt3tuIN3854nlnXvqbNLtxo5JOChfCZJAiWTs98eUswnVRwQaAyNY63++Im0tN0MFs4/DXqZs46oi/byoTb+4Wr8Y59Uji+zGtgnFMMHnjPNSecz7umjdg3CWEoydkPmdrT3B6zgMpHSiKOdeqhRwBALwv4mrjXd5y8ZwXSjxRAGAuhtGD5zyYUmoVsvahnKIQ9RWX7Lt2kwSNN6J0GqW855xOCFdwFTAVOn6fuE32a0aNtWb3xjlv3GWHOGxC4Mwme/nM9W/Jf079/r8DlMVKETznEll7rJAQTtKH/H0GBQt9/QP1nFuNsNo4l8oDcpJ1Q7DGOSFAdob3cVpaSk2yeA44QwtSHMIOLhyOASLvPAmIEHvO+bEtOVaDGPjoOacXMHIdDrptU90fJIAxt9iDrJ0f90Ux5wu7ARsnAL+/LHiYz98qCiiJ2pVcp+cc5sLQxpy7cUyIjHNqrLdYLGirPIqHVOnQW3KBoytD0AaGJ0qVcf7ggw+CEOLyt3Dhwkg3LbSIpN4RjDlXQhiEec+5NPlFsdmGeopL2Ksbjv9d/9j5AT+xVmpg5YgbWXvgCeEyHV7zKsl6YQWT9+IaqdI/IUsIt/VDuxT48E/+788rMsY5HRtp8+HhR8dfUQsj/CSTr5cZCHQ9WJc65w6plTTmnJfBxys9y+3THAnh5AxCOWh5lTvjfP/5bBgsNhy84CURizdZexEfo2m/FiWeFC6EqhnR9aMnvlaT6B7UwOaiYrhRYMSlHAMUCmDz6+2x+fX2GN+1Ie6plYL29Svi3trl7Bv6MknzYMCbrDYQT55zEOGeDFc5NbqMGgC3CeHocdBX45xvs8nKBSRN5CewcY5s7X7VOdfGiV6arZzwG2qWs38mSghncU4Ki2jPOX2NqYRwgXjOlTb7tUvWOs6lTd5zLsja1SrUcXjOz9Gec6PkfszJBAy5eOLcJLRXOhOLma2cuP0eymzyE+Cw1fTlxy43WbktVOZ6oyk0fTynWDye8fCGoUtCOBWl3go2hMgX6D5c5Dl5pPh7dDidGbeKzCAEUCqccwK568i/F6tTQe9QyPh1vf9dCnxyF7Dlfc/buXjOnf28iGoDACTxlyYclWLo8xt0tnbPi+7+UuzoeymxWsQqJP3dF885j6ecEoEkzTTZoIS8s4yfd1psxBkOkZNh//f0BmGxiyOS8cpHLucaBc85RJ7zEMjK3dzPdGlLut+aLRao6edNSVUyKsOUKuO8zEAia5zzid20VNkc3jiXSwh3h+IKVAqCSubzzg/4AVWpdhjnvOf8pljW7mZC5g1epl2rvHPSyXtxjQbKqxJUQjiZASiUWVSN+e5lWPTEyOaDd9CNrJ2fZGYVBe5hpL2GrnXOeVm7RfY7SUo686rr9a1MJ4TzAToxiTHniuw2F7PthkWwEsUcQQZq71d83PmpK7eC2q/PhOjeN1s5Qbqml3rOrSZR31HDimuSmOhDF3IBAPVTE5Cg16BOxXgMbV8HPw9rg0WD73WqKXxZTPDiORfBt4tWqzj+71M5tQAMiuxiqeecN87FbbOIjHPfYs71GpVQDieQRQVB1q73MVs7PSZIPOe0J5FPsnariPLoU7J2QxHlmaYNDTrmPADPuYqzX7tKMcTxE9x5zh0J4bROWfvlXIPTkDJJjHNLMbDlA9ydvwmLtNOFt802TjwGeZC18+NZ2JOEWeX7b8YNZ/gOXcYoGHLcyNqFbO1Sw5QOrSqJyTjnfeFVFvr5aDUKOR0qxOuEOYGcrJ0O9Yl1lxTPE2vesP/7zyzne4d+BBY+Jg65oj280phziec8SWe/FxThON8iz3koZe3B3SMcR1DBchnJKEByrAY6SNrmbl4od+/QOSWkBFjRQuQ5p64LnQjUJWM7ZxPNP07fKIC/uHrO5VWKAeHmXIi6v8Q4F52H0pQsuZTCjPNohB6MIihr1yucN6MWbhLCmWxQO25aBT055729Kg0sNg63wHvOpbJ27wsRN/KNLola+HhzfmIJAAmOSYbZQHlEgvKcywyC/saCe2LRY8CCLvISIfo4biZwIkQJ4Zz/55OXibxifkJ7E6SydmE1V+IJ5PtJopI6/zITAt4490UWa+MIrJRHqTDrsux2Fx2JAmWlZGe3AhsmOK4t1adkDDl+Mpvs8DQ1qZKIuopLWJrdB7aN73ltb9CE6N43XT+Jl1UrUR559sWVImpxQcZzfk3iOeeN8/+rmRx8ez081E02TuKl4D3nlCHr+L/XmPOzW4GP6wHHV8t/7oZsiayd7+9Gi7xxrlUpoZALS3GDEHceQFI4aSk1UUI4ubGTVkhIJq28IaZVK5HqaJPFRpzJDh3jJmcqgs3kRolEec6LzDb/ytoRAo3DOK/IG+c2eZm+UUgIp0S5OK0gyz6X5ViElRrnnA3IPQ8pJgsnHoM8eM7zwu05F54x8v3gxJVc4f9mc/ChG1YbJ/ymZInnnPfaFkvHYFrW7ssCcbDQY0OBP8Y59WyzmoRkcJUS9YLSSy5hIZ3LIIZOUusrOtdQEfw2Asj8G9g2Tb597jznDuM8wXHKFSQMxk8onT7SJHf+cuMEsP0jwFwEQ9Z5bNe9hnT9UKTEasV1zgHfEsLxeMqTEMBibZFU1k79bjrXiItxTmyiBctT1/03zm/k5AkJAmEuCm2dczfXXxTZQs07LRYrVAr5RQpGeGDGeTQS8YRw9uPzceYAoJbJ1k4IQbHF5ohVBZSi2qy8rF0Nq404E8KZC5GotA84rqXUXH/r36dvotXUzXh1ebooTvO8wwATe84dxrmRmnQFFHMukbXTg3oo4/DkarnyEj36mL7EuYuMc+c55T1AeQaL26yi3uBXgLVqJVRKsSHiLKUm9Zzbv5OkoNou85CN1zm9b94oMluhpAxqgxvP+aUch+dcbmK9+T1g5yfA+R3icyxnnBeJDbVqKTG4R3sBsQoTjOd2+NTmoAjRva/ZPgWva1Zgl24kNIWXxJJRm9RzbsONApPIM3zIER7QonqKl/b60L88/CazlZP3UsgY56neYs6X9rX/zuX/894mimyHwsTVcy5ut8XKJ8303TAHqLjzADznhXxNZJ0asRqVuJSa3AS5iJJMS8aQYsrQ12tUwj0oxOo6DFerqUhcd1hknKsEryvgZ8Z2m0VYiKmgBwACBW04y8Sc85nz75BK26Wyds4q2xfNNsnzRhpzTnk7+cXGsNX0FZIcyi+anrrqbEsoPOd5BoswxCXHqIW4fICStUuvn78LxMEiMs79KKVGn0OLQViwS03QCSo9uWcBv+Cm16o8bucWWo0ivY751LPJQ0I4Z7Z2+/ETNPaLpAxHnXPpGOGrsXr4J+CPV8XhdcHK2j9vDWz9ANjyAazn9whvx+vV0CmknnN3snY54zzUnnOprN3ZFpFxLp1bEU6ktjoVQMb2/FxqEd1cSCXfLSnPufP8Wi0W0dwLxFYyoS6eOPorcHJtZNsQRphxHo14Sgh3agOw79vQH5OK2+Jl7XqF89h88iG6lrnJysHGEcGDIzLO+cFbpYGV41CIGHCOgbMc7JI9g9nqNeb8u38yQIg9sdnqw84HNi9rr1Ge8pw7vLgWd54eX+A4ZzvkjINQes5lj+84Jn0ufFlgcCtrd55fPp5WjgU7MvDZtjOyn7nL1A44z7nJyokMGMFzrqCuhZuHbBVH3Lk3pKvYtnz5TMcXHeWPDHIZ282Oh6QxH2LPufhhZeOIUFaLj9FUKBSonmz/v9EY2lhnWULkOVdf3gvAXg5RsfsLV1k7NRGMUdpAiNPotdo4HL5kv19b1EgOvr0e8ie4Nc7p0BSL2HPu1jj3w5tNc0tYkLHvX+fo89I653zCQ42PNc55UoPynNvPR7xODaVSgViVl4RwhdQijMQ4573wvFFSMd7erixeYeMYN4mpSFx3WOJt1qmVwgKFbNz5sd+Bvz5ynchZne35P+NuHNQNxQOWf5yfyxnnjra6JIUzSbxSnFW2L5qsnLj/0bL23V8A02sBhxYDoGXt4fKcyzxbHFzPN+J6nrNtofCc8/HmCXo1NGtGAx/VsSczAyVrlyaEo8fHkjDO6WtjzPU9+SrtQTUXCgtfqYlO49y7rN39dj4hrZ1Nzzs8lFIrohbcACBebR9n1AiD8SMdI3xNOrfyReDAQuDYr8733CykyWLMAzZNAq79R7XFMZ6e2waDzTlWa1VKV8+5uwVduXmROtQx51Zx+BB1zpRKhTD2eXN8nPEzYzshBIX5uc43TIWhLaUmORd8jhUzrX6iFpysNotrYrxIes9NhcAvzwM/D/ItJ1MphBnn0Yin2t9LngH+fA24uC90x9vyof1hnWOXAgqSTaXzZpTznPMPMl7uoqKlWFRCOHuSCQU4XTIAIMFhsBm8lFK7mmfA9lPOCea7v/0neHZ4Wbuc59xqomNU/TSi6IeOXMyr0tVADSn84E+fC59k7XRst0V4sNOrrNIMtjwGsw3vrz6GGetOCt5i6eeAG+NcpxbsIDopHL9qHA96kiI/iPLSdm9IjXOlTFyi1cYJmd9l69vyD3SrUeI5F28n8jRR2Y2rJ9n7mClESZrA2YATa+RlnKGIObdZoTJQK/CGbNeEcNR1qRxv/33XHNLQk9cLYLDYkKBTC8m43BJkzLnJKiNrJ0QSU8rHnHsxzmWyk/sCH8rgmhBOmq2dKjfpB8F4zulyQwAQq3bff+1foD3nYqOaH7t5r2kFwTg3ibZXWIvFCZro/XA2KBQKp7RdLjRlzRhgywdAtqQkGTUuN8zZinKKQsSBVtnQCeEcHk53nnOprJ3YZM+HyVNCuHXj7P/+NhwAJWsPR2ktwNmnZQykv07dFI1zZnPwnnNRvPmVQ/bx78ZxAECc1o2kmz6HJS1rB3yPO6fPoTFfWPhKTdALcnFPCeFiNCpB/l7sz/Wmk/ndkixs06oMqeec0HMo8SKZ6J62WYDMf4CNE90qLPzCZYHMz2tKx9H74znf8gHwz2zgi7aun5kLYRQZ5woZWbsfMeehSAh3aj3w90xwNg7FZpu4PRKDlPeeezPO/c3YfqvIDL2NMujNRU7jPBSec0m29kqOkrY2OrSIuuctVqv42QyENv+Sv5iL7P3OagxMHVsKYMZ5NCKKDXJTm/JqeuiO99cM+6TdkXWUjznXUSuGfFy52Di3f85PJJSyMedqYX98krMYR6I515hz8eC1Yv8lcARoWTMFjdISkVNswbu//YfcYrMweRLFnDu8uDZ60uVv6TO5kmT0PsKdoI+/xvRx/PWcU6/p+CR3xvmNAqNQs1kunsnoJlM7YF89jte6lvjgjZkEQk/m5R/iaT4a54WSzKlao2u5nat5RlHsq8sD0Up7Bt0bN/xkNkGvFhlgVRM1jt2EaLJ6cBGwrK9d5iclFH0t97xYLmwxAMV0zLlR9JBNS7BfYz5B30FHvPldNZKhVHrxRvviOfdX1i7t+xLPeaHJKh8nKhcP6gOupdTcyNqpmHN/4GPOA6nPznum4x2LkLEqDworQGw8SCTcQgknh5y2fLz992YVmOz7cpx3tc0glrXT+3Eck2+PS94IQpx9TSoht3oZl+U8546M2nWEWue0Cob+rtg4F02gRfeCvKqKECKMZZFICLft1E1RPgGLO+N813xgxfM+3Xc5Qhk1rXN7x7mI5TPuS6+faIE4TCXlRMeTHN9dDXgpdNtM+bhJec59ijnXOmPOjb56zi0GwORM2ufROKfbJxn/iiSLZPQ9TWxme2muHXOAvV/61i5PSPuJv8Y57ZjwJ5Hv1cPuPzMXwUipMfVKq+8x53Lt96Rs9PV5umYMsPk9GK4dAwBxgjqJQSrUOpfGnDtI0KmRHKsBRyTlH71gr3FOjZFWAxK1Ti99IKXZREgWavg5mMgAp86vzSpTUi6SnnN6sTAcZQejAGacRyPuZO30ymXeJbts8dcRwMW9oTmu0f6w4cspaEWydqvjM1fPuSBrpxMUUdnaLbyx5IjX5h9ABg91zjmO4KcDFwEA/2tVAx893QxqpQJrjlzD59vOArBPdGmDkfecc2bxoCaLOymMVB5OiNg4D/eAxA+IfnvOpQ80eztpg9ldUjjaUJBblRVKGcl4zgH5cmr8Ik4coSfz8ufOV8+5tOZoguWWyzZ8MjgeF88Xb+xZPHvOpfHmPGnx9nNgswZWq1pKbvof9v8Uu/6WkMjapZNGq0nsObeZRX26kuP38eoDId68hpd4c8A58VTpYHX3aPGYrV0mvk9qnDvu53idWphQy3rPdQmu7904YZcte7hu2ZLrrncjaxeMc39l7UKtc/9W++nSZ/xiWAxtnMt6zmlZu/i+KBJiXaWec7NovFOCoJyCko2LFirFhoWLrN1qdF5vyb1PvC2aUn3faBUrd3hZ+7mbRfZEobysnY835ayizO/8tXQtpSY/WS4y24TFSqOFc0lGGhJ4g00yblttHP45nSW6D2w2i3OBm2b9W8B/K4DTG7wejl9sTInVOK+FjQ+TcCPpDqfnvOA6cCVd/J70+WDy0Zih22YqEDznlRL04rKtEujnmjNbu4+LMdISeL56ziVjOp/7gU/Kp6fUinmF1D2bddr+rzEPXuuqu8NF1u6vcU4Zvv7I2jUewtbMRaCHjRiFDXpFEAnhPLXFV+Pc0e+MhXmONnnwnKs9e85jdSrUS7UvJp72I+78Sq4BCRCP2XEwgl8fD9p7LpW1x2uhUSnEceW0cW6T85w7znUkYs/pBS9mnDNKDHfeZNo4v3nSXns7/Ufg20dCc1zHJMcseM6dx+aNIrOMcS7rOafrnPPfcQzuOofn3JOsfde5W7iYbUCCTo2ud6bhzqpJGP5gHQDAl3/ZJZJ8fV4ePnM4kSm9JGLdeLuMP08m27eLkWsVT2yvHAK+fxy4tN/+kPzhSeCbjv4bUu4esEISOn9jzs0urzmOiLxZt9yUU7tBxb9K65UDtPxPfriQK6fGexrFxnmwnnPxAyKOFLqELfDJ4KRtF+DPk9RrJnlYZdOeJoqKsfZzoOSsuOJj+TcXzmwC/pkDEIJ/L3vI4hqKhHD8pI7HapTEnIs955Xj7L+Pz9ie7vCce403B5z3gFIFM9wk5vFknNtkPOfSsBTHa4VC4TnuXJfo+t5nreyyZbnqCLB7TKXlpnSUZ4Q20vhx0N+EcIF6zmnJOO/tjlXSCxlePOdS49wkNqwrxOswSLUOtS6udNm2PCjPtMVVCcPf/4UmR6x35g67ZJz2aEsmtcZiL3V/ac+5Wbw4WL1cLNRKBQwWmz38gpe1x5ZzfNcGGzW+poiMc+o80QorfbLwX6l6KCzSdt6wkIzt/17KQ57BgniNs1+pYXP1atP4oA7LLnLkz4jTOs+t45rEuqtzLnoG+dBfrSZ7kqbibO/bzqwPfNVekNYDcO3Dvspm6b5lzJeNOQ+5rF0quXcJ26DDuaj2SZ4z0vASFaGUbvnUPUJsQO5F4KN69hjwQJAap75cU1EyXDel9bwa57HuP7MahbAVANArzcF5zkNhqDmOZ3LklYlTuUmEB6dx7hI+5yBOq0a9SvaFYpdyapvfAza8I/u9y7lGZxk1B0prsaAODTrunLquCnCI06mRoNe4zUyvJMTVOLdZgIM/2HN1XNgDnyAkNDHizHPOiAgiI5f6P/3Qu3HUv4ymvuAwznljWkMZ52qH51wka3c8zPl4dBXtOXfcgESlFrwQfJmLGKV3z/nyfXaveY+7qggPzpEP1UPDyk6PWM3y4kGfnyQq6RtXbuKS8Zc94cz1/1w/czFyLeJ9HP4JOLcNOLwcOLUOOLsFuLRPnJ3VF9x4bZw11v01zl1l7YVmq+jZmuXWc+7cvzTrOkAlzpGRtQPORRE5z3ksR/1ONwsYlX1NCCetOQqAFIqlj3wyOB5Xzzk1KfbgOc91JFCSes75+0CjsOLH3eft3p+9X/vnzfixJ7BpIsiZzSj29JwKhec86xQA4DRX1f7aXCQeR6xm0cOyYpxD1p5vQk6RGeey7BPEu6olez8Wfw4VKliV7oxzD7J2i0zZGqnyhXrtsZwa7TmXjgGXDrhubypE0YVDgke8XJwWuHkS+nznpJtemOTVRYHGnF/PN/qlvOCNM71GCbXjmHpRQjgvMedmqXEu9tjVVN3AJM33ePrSNJexqbyCMrLlZO20LPrIz8DCR4FtU8SJ2iTjU1GRFy+STJ1zXtauUSmFsf/s9VynIRSTInyXo0ou8iU27Qnh3MjaY5zKEGkd4bAkhePbwauzHGw/eR0NFRdQr4Jz3FHDJrtoKuBDHpTcYmqxUaJmELK1SxPCiUKrfDDktk4Bfh5oH988Qff7y9S9KDXCfFWpUW0jpnzcKuQrOugFyb7nhHBK79nabVZgxWB7KAHgapxLn+cizzl17iT3aZEkWzs9/7iVT90/hNgdMjYTcM2DTNwTLrJ2H+YV9HX31Tj/eyZwYJHzNe05lzHM6JwKMbBA71Ln3J1xLtN+T31GVAXJw9jr2Aef9DXOTUI4wHvMeaxOhfoOz7koY7u5yH6edn7qqsKAo8a5QvLcMhcJ1XFC6TlXg0Oc1l41R+FG1q5W2Fxl7ZwF+H2kfS798yDfjrvyRWBWI8CQG3DTpW0rkZCbCMCM82hENIhQ/6c957kXRKv9IVmNcnggnLJ25z5VQrZ2V895nFq8DQDhwU+o0mMKx+Cudxdz7vh/brEZ647aja4+99QQPtaqlfjo6eZCOS+pcc7HPopWXuWMc2FiJDPAuRi5EgOB99JYTcDuz8Tb+YNb49wx6PjrOZdOnmxm5BWLf4v7mHNa1u4hNs+NrF3wnMvEnOs5epIi/0DxJ+ZcVGsTQG6OOO5cKmsXxWbZrM7zail2f58ByBZqnEtK5znuMw2s+HzbWVxfNtIeo3bJ/wSNluwLsHkagkPhOXfILY8Tx31UcBWiWHubuM45rwy4lmdA+qVcAMAdFeIE76NHBM+50oNx7macOrMJL+7sgB6qXdS2Ftd+Tb32mBROTfUp6URArg1fP4T4BR3wkPIQ4rQq6IkJmH8v4r66TwjpoaXt/GTMb+Pc4Tk3WjjXGG0P0DXOeWgJrPds7VJZu9hjV1mR6/xQkv083kNCOACIdyzOFRqtwLUj9s/yLmHvyUzntpLxsbjYm3HuKmunw2r4pHCXrlETW8E4t4GjrrFOw6sfJIvBtAFFGeeF+VQsMcJU69wq7/mp8O9nWKd7E09mfSW8J2ucu/NoukEUrsGfW8dxecPQJS6bXnD0RQJ95Gf7v1cOet6OTuBHnXdX49zHZyrVNgUIdJwRCgVQIV5Lydq9xJx7y9Z+dCXw3y/2UALAGQ/Pe4Wl8y93snbJeFbsWBDh1Qv0frLzqXuNcB6TCPqEdIzwSQ1B3fsKaqxzJ2u/ccLuEf7jFed7aupZYMx1OQRtnOsUFmcpNa1jgdVtQjiZ8+DpN4nCRT2MvY7fZjbbf3uM0v1ChNaRpNJdzHmshvKc07XO6XbK/D67rF0ydzUVCI6QoGudU+dCBRvidGok6jVQSecGDpTgxJ8BkhBQH5VgR362LxqfWB1Iq6nj0bL2EkhWGQGYcR6NcG4mXQaJXIxevc27EPxxHTJEfqDRUoaQytEOeoVQWPXViLcBINy4hJ44OBLC6XjPuVTW7vj+r4cuw2zl0CgtEXdWFctTm1ZLwtuPNkKVJD06Naks+kxjzEaMRlKKQy7mXFrHXPSZzKqtyMB3DFA5GUDm39RxvBjQxnxg61T7wwtwH0/He1L89VrIePylq6tuY859lLX7E3Nustqggg16zntCOLcx5znngevHhJfFJte4p1vZOaLXFyWydtFkS6SoMEI2IVzhDWDXZzDk2Q2bchJZO//g5uvRFuc6jAMqZtxi4/D9rkw8PHOb3bvuBhOngA0y5/Tor8D8VsD1o26/6zOOVfkM4rhXpB4fSSm1CrH2ha+reUYcOu9HvDngHKsUKqFsogvuJkU/9oTOVoRHVJQnzSa99yB67fScy9x7nsZNuTZknQQAPK7aaV+IMDoNtFiFvd/QSeECjTnXa1RCvVp/4s5ljXNK2SQ7gaWNbEux6LlSLCSEs++vvEac8dottAee8J5zlbONvILIasLBU9QzSXLODX7I2uXGHz4p3NXrjv6sjnEuyBAbbNRk2W1CONqAorI8G3PFKqhiSxiSwskYbNlFZgwoWuSyqVohI2unn1M+GOc5cp5zm7gPFJslpSdFzyAfDEJfy4zSid4UbpKMAd7l0m6+l4BilI/TQa1SejS6RTHnGvcedgDArbPi17y3M6mao62StgvlWK3i8yhZlOfnWnGCce48z7mFEpWKUGkkQOM8kIRw9DamfLtKrChL4jmnfh+t1pEsAgGQza1io+ZCsXSGcj6pp1+ecw/nxhfjnBCnrN1RkSWWNs6lnnNfYs4r2X/Hhexip7OAPq8yqieXhHAAYC5CklBOLdjcR877XA0b4nUqJOjVkoRwzt+qAgepn0J0Dn1RudDbeAp18AU3i5u3E8w4j0ZEsnY3nnPAbiDySGOeAsExmeMnnhpKXqSSSQjHT5rieeNcWgYJAFE4H9gKh6xd507WztknB8v3XwIA9L67GhQy9YoHt6uNneMfRv1KlHT17FbgozqYrv4KOjqBh5znXJD1yQzQMrHbsll9paWvvBnnmyYB26fZ414B19q8wvEsgcnPZGTtmuO/oqnC2S88ZWvnkZO1F/vqOae+a7ZyLjFT7mIIE3RqoZyPiIXdgK87CMaCtJQaAOTm5ope855zfkJuMNvs5+bbTsAfrzo3tBoksnbH/3fNA9aPR4NLvwCAq8fYcZ51ChuGtr9DWOU/cPYyCCHYcPQaOs/+C+/+dhRnbxbh93/dhzuYbQp5z/nPA4GbJ+STxPmLo48XEYfhIu3fFoNoclBeb2/P9XwjDgjJ4JJ9PJYz5pyo3Sy4+DrhBtx4zp191aPnnPZkScdND20gcJRRo8adWEf8r5HynAearR1wlq254Uet80KJMQ0AWqV8SJCzkdIM6c5zJyysOu67ZBUlDTaKPcfifboutolKqeVfFo51K5vqv3y/MxUAn96NWrvedn8MQPQMNDomvjEiz7k938jNLIc6QJfglHdLEsKJJtD02OqmFrU1TxwqEx5Zu6tx/vfpm7Kb2j3n0vGdlht7l7VfyeWl3joXWTsv6bZxRBw7K1ps96GveipjRUMb5/x+b550VZMFIGsHgASFQRgbnIne3Mec66k65wZ3CeEk4VPCIidvnLtrq3S8lSzK8zJiIWSMekbmFkgWwtwkEfQZqZTbl2tKzz1Wj7arxJb0di9rp3OE8NeFnoMJzzTn+KoxOPt9LEfNi7TejHO5cd+TrN1N5SM337c4jHORzF7yPZ03WbtWhYrxOteM7fR5FZxFVns9+azTuJxrdEkIB3OR4DmXUzj6hchzziHWIWsXGecSz3miTvKss7k/L7LQ85lgjXP6Hri0314O2t/KTFEOM86jEXcJ4aSJVmgPx60QGOeOY1n5eEo6IZys59whyaJk7UIZK8dv4KiVcYXgOXdK5Dl6Ek1s+O9yPo5fzYdWrcQTLarKtJHYk1BIvYrbpwMAepAtElm7h9VVXzznNrP8TS+dSHhbhb68X/J9N8a51RSY/Ez6Wy7uQf2/X8EfuneEyak7zzlt3Mh5zvmETN5izqWy9gSF5OHiRp6mUChcveeEAHkXHQnM7IN6ock17imPkqAaLTZBos9P3g0Wmz1HwMU9Ttkl4GKUOj3n9omCxlGmTZoQju8fCpsZb3ZpiCS1/Tf9vOsUnvxsJ4b8cADnsoqERGEuSZaoxTajlYAL9xDs6EvFcGMsS0pcJevsdqnFRrA3wz7e+Gyc8/1WqRbLymk4m32C+kU7t8lwBGwWV+WLjOdc1jjnPBnn7o0tDkq79JeayMU5sqLTnnOnrN2/hHCAU9p+3Y9a50UynnOdQqb/0kjLl1HGqJAQzrG/RKWJ+lqu+4bIydp1DuWMySok2SQWI/JyqecVP64eWQHcOg2dUd4Qde6bjjl37znPz3HsR5/k9NxyVnBUvV5R0iZ6fHcjPSaSEl5+ydr3fWPv197yCcjIMrefkj8nKnCu4zLtPVV4Ns4JIchw5I6oXTHOuXDlaIMgqYZkvPJXvaWSutbcQJ9fqwk4uQ6Yfy+w/Fnxdj4nhBM/1xJQLCRe5CX7Bhn1A5+ILEajgp43zt0lhJMuxvPy7LhURxt8NM4lz30VOGhVSqcCh9pPHu05J5zToAuRrP3qrVzv35G77pf3u5e1i4xO3jh3jhk5Wdcc/c95f+gMTm97jM2R9wgq5zPEn2ztwXrOqd9lccjaRdnjXWTtVMiMDLFaNRQKhWvGdtF5chzz4EK7A2He3cgqNMnEnBeivM4KDazBe86p36F2yNpdEsJJPOcJUuOc7gO+9ElRlRjxtbuaZ8ALi/bhn9NZ8Am6X64ZYy8HHWiixCiFGefRiC+l1ABx7FYoPOcAQIjTc04N5nwmdrHn3H6DO3JUQA2b83NB1m5/YKuUCigc/9cqnQOzTVQDlMPy/XYpZJcmlZEsNYwA4Pgf9iQUn7cRv09J6sTGuYzX22PMudRzLiOtBVw9395W7RSSW82TrD0gz7mk3Q6ZLgDUcsTm3yqSL/91Q2Sce0gI58VzLk0IlyRd+fXgsUyTJoWTWZUtkpG1FxY4jfPLufZrEKdVoUqyfX9Gi01+omwxiicr/D3nWHRROiZR5eIkE07+gcRZoSAEsQ7vpY6YkH4xFzq1EiM61MFX/e8W2iz+Xc7rZOIAKwnxEEwIcPOU88Hq6EuC51yKZJFJDZtQVstiI4jRqNCAVqh4gu+3ChUU7srncFZ7xYNrR+xlzTxh8+Y5d3igZY1zqv/46TkvF6cTbdNVuQeLNR/CludUQQSaEE7Ubn8850ZX45zOCeKa6drmOm5Qxqi0zrnWRn2W7yHbtpzn3HH/FxlMQpJSk9GAGCrfhNXi+K3ScdAddMy5pM454CynpuJltAmVKc+5DYR6hupo41zk5aN+C9XPFJLQD5895zYL8Ofr9iRP1w7bjdCVQ+QzGdPPPasJHEfw1yn5iakGViRd3ATMuwe47Ijn9pBkTMr1fBMMFhtUSgWqp8S6yNpVSoUwtot+q7/lPH02zqlEtlYTcOgH+e18zaMjNc4VBuEe43+XS7I7iGPOPZVcAyAOB7JZnc8nrcMDKLeQQIhXz7kKnJCU0b5v537yi6g5BWfz7FTwBckYMf2Pf3HLjZpOwN3cw63nnL6n+Moozt9x+fJFl33GmpyLUrxxblboRPezfNv89JyLVJruDH7aOLfvXwv3Rqg3WTuvTOLjzk/xcedynnNJSeQkpWROacjBuNPP4jfthOATwlH3lgoc4h0x50o3MedqcIjTSBaiJY41r9ClPSXXbs2Ra9h0/AYW7MiAT8gtzBz/IzJl3cIEM86jETmjAfAcO5ktiYkKFHOhs0yQKCGcIw7Hk+dcQWDms+Q6HlacQ9auViqEh7cGVkE1arM6Bxmr1YrfDtknwL3vqS7fPneZSkXGOTVwEZus5Fv0r+gzOVl7CDznkAxsnrK1Swc6Oe+/y/ck7aa8KfXK28+7xUZcSnCYrZyQLAhwE3PuY51zcSk1Gc+5hweni+dcplSGXLb24kLnIgkvaa9eLlbw8hvMNiHXgQhLsUShwjnfB6B2SILdec75dikcnt37a8Xif61qYMuYBzG2c0PBO1oknexRv8toU4Tec35iNTD/HuCnAfbXHO85dxMDLiMlpRP0NauWJGQH9wp/DpVKKLUejHO+lJsh2/N9w1l8ijn36jmXKo48GudK+4IMtc3T3Hq0VR2F/vwW4T2zw4uu8TPmHKA858HK2umYc5cxg7r3eAkh7Tl3LKzyXlMFJWU3FEgWM2hEWd8JwHFCzLm6+IbQDrOpWBTWklfo+L+nmsc0Mp5zenEwOVaLeJ0aqXwiu/hKzjGPs4FQChW3snbaUKInopTMFvCj9jUdl2zMAzZOtFf1+K6T67aS8e3k9QJkF8ov8KrAocPBV+yVF5b2tb/pJWaV5lyW/XdWT4mxnwuZ2vNxdN4Auf36YhAqA/GcG90v2PhqhEpl7SgW7jG+f3srpeY1W7vIODc5nwNCQjjHazXVv61GGc+5WM2ihs0Zbw6Iti8sliSEE2LOTYEZIZIxgrOasOLAJc/fcTc+i4xzar+i8on27xJq3LEUZLkY53EWp9xZZ7X3VTM0ojAVn9vmSarviwybet/qMM51xCz7OeA9W3uM49ryGdtP3+A95zIx0xIFTDm1Yxs+f0tOJhItN9BYeR5FRV6cQd6g+pkaNsTKxpw7r5MSnOCEE3CnbMn4C0hf6vo+LWuXPNf5Z7jPajJ3eRfo6g+lHGacRyPe6pzLJV7JcZ94yudjAUDxLaesHc5BUSkja+eTCsWoqFhMs9jw5Y1zjUoptFvBWYWVaitlnOcUGlFgsiIpRoPWd5SXb6+7uDZq1V4kQwJcveeC91NmcJG+5844d6kZ6mVQcfGcu4s5N4XGc07FzFaLsQre7SxJrXNpHLp8QjhJXJwEft/Ns9cD/8wGYJcA+1yvFDIZ2+nB1/HAlfOcG4sdkwGbRUgGVy0lVpjIGyycfJ+xGuVl7Y7Jk8bhSZSWUnPxEji+17FuIj58simqOjz2ojhcN7/LZOXs8r1Q8vcs+7+n1ora61bWLlVw2CxCTDQA/F9NH5PBASLPufr/2XvvcEvPqmz8futup86ZPpMp6ZU0SAIBQoAQEKQIUg2IUgU18FEEVAQVRPywgEgTQSlSRUApAQFBAiEhnfRkSqafmTlln93e9vvjaet53ud59z6T4C/JN+u65ppz9tn73W95ylrrvte9qoJzqiJuCtQZ51Ia+5aa89l2X5XTyM8eYc15UUbOW5wBkg/UdwvkvHZfkPNl0NqlIFxdrf10fS6tRzKI9oDGCuM1hRBKJJ7co5Fp7QBQZJLWXu8qRDTt64JGc20+zkatS9aQc3ZtNSM5ODMWq+B8fK1Ga9dqzgP2ub4pCEeZZ8TprhnB+ci09gOkZ/fSAWC+IvDRas572H5wCTOw1/pHtAuKqH2mnx/SclFS2le2+Pt1WjtAa7MdaNhIyPmoNecEOc8G7uD8CGntW7y9WM3XMNW/vDrp3IgDrMCC9Gk0KwojOB+o74zFPRXBOlln+4uW4NxEzjOZGGDHVtfcpgFYUZBnUAx95lYz1oial+CzV+1Abq6d1Fy+h4vWTvUq+PnmfZWQyNrl4HxCC865KLEXa/N55HOrSuhoc8YxtsjrGWf7RFXB+dA+5+zZnmgqtlOfUhzTmAeTPr++8TXsf6J03+s6/MdRjQbnXoaxmqg5J2PBoLU3TFfF9Vw++avAV16lifkCMJBz/dkJP3TkhLUrCXPdZ0b7/IPAjgbnD0SjWVEbrX18XfkzNvr2KGYuZp2DkpoeUlp7XhaEEw4epaknItjm78/5AhsGnrbYik2T1pwfandRwwAvnLwRfuJAll3UOZK1LwWFJvK8bLX2Ee6ty3nZfS3wg78sbwaVyLkZ+I9Sc24eX22Iq8KupCrPGiijSQm20dqHtVITNee/O/9eJny3/1YM0lwPHoDKoKgaORe09nLNedJts3v85+tw3A3/FwCwcbpBgvPMXpPpQs75c6lxlflSaQW9z6YaNjEqRKQ5P2RTGiSJLgh3f1CyzEC0GEZrN+qScx05P/uYqeHfefXHgTuuIDXnAcJ6heCLEA0DdBTNtDx1B+eL+zCz67vwvRx5wUo2Sp8V5rgnNpvylvDkO98JbP+xfK0OduyMrCOSXXREgnBcZX4UR2RuB9A5ZK05D+n8Mp11IQYXNVUAkXSA278F7LpGBvsyMCCOX9qtEIQrjZdMJgzGeiqAKdIexkhZi0TORxUEpGrtFlo7wIT7VntiTyTBeaEj5/WQzauBSWunKB/t6ZvoavUj09oPqFIitPcDY6vI9ZgJ14H28975HtZ6dsZCaK6jxvkOo5TefYA9s2NXjfFOIPz9ZO9tyWQiXRPtFFenUYZSFb14ZOT8yGjtjw1ukIk7WXNueYZZ0kWMBI04wHG3fQxX116N5y9ZKPZLs0Zd9cCCnFvOtb9YRvhKZUS57MUOQPMTUq3kLzPQ1tFZN+rY+j2oIcG2gx38+K6Djg9gROScXHvfEpyTpKDfK7OlJnPFbIr53OthlOB8mbR2es9cyQ2KnCd9bPQOICro5+zBubOVGn+2x3PF9u1Csd2KnOvsypYAmYSuAWkJmnT0NWrZZqk5n2hEeqtaQxCuZmKCVuYpeVZUuR/Qa84dwflBW6LdZjbkvD4JnHjp8M8+SOxocP5ANBetvVMRnNuyiIOl5QnTAEDnkArOPTX5/IJNOluf89i3IOciOOfIYOgr5ByZCs4prX2u3cNbw0/jzXPv1JW1qVHqHL024hg0YCzapoiRuKe2DdVGax+pz7jjPd99J/C9Pwf2XK+/7kLO//21wOcuG+3Y5nlqx1dOwIqgyxSoARxc0t8n2jmJfaGK1u4MzhuRHogPltBPc8SmU3n7t4Av/nY5WIINOS9vXm2LWnsxWEJ23b8CeYJH7f4kjvV2G7T21O68OmvOmSMrKLmlPue5KzjX2RU0iOpovdbV/U+SgU5rrxSzGTFwN/vISlq7GZzzB26htdNEyVnDxOAO3MZUfD/9HA05j6uCc4ooUhTNtCwpJ9bE7//5BgSfeyGe1LgDgEXs0Kw5p10vKlCnS4JrsPXerwA//jv5WqNgzzYnDoHU5QiPQBDORM63X8mE0kzdkKVZ4G/OAP5yq7WVWkgRVXOMC4c4bioq+eFtTGn5c5fJ/sqSJk8cv6Jb4fhZkXM21yYGKjj3Ul3QaFFQdEdJNAJDae0AMDNWwxrw8x5bq9Ngyf2o8+TxIDNKnPoLalxQQTj+HnGvnSJhpu0nyHl7P3MWhX3wkcBnX6jmsSEIt2+xj7WevdY/9CyOv4YCVgexGnKu1d0SWnssarMdCZ9Rnhvdm13JZ8CoOb8fkHNxblsfCwA4x7sD62vsNVeLtCxN8a/+H+N/ar+Lydu/jGN+/h74XoFHpz9Gycw2tVlfnZtIfInnQdeZ/sJQan6AjN37A7cB33uXVoJTYsbY6pSXY0bSf32LrV2fuWo7MLcT2GMpGVxuzTlFzkVwN1DrQNg7VK45L9Tf45TtqX3Ew2vOlysINwqtnVzLY/Z/Gj+q/T5qRI+jrNYOrMespMCbY1kkP4Vie1EAd+5v25+l0XWhXvD3jHHknKzR6f2InAfIMRaHmKiH8CqQ8/ooyDl9/qYie4cE54k9OM8LDNdBAOzP/rJ/OxqcH7VfstHFyIqc6/29AZQ3z93XAe/aAHzn7dXfZS5SnYOSuk6Rc08E54K+s+9mnHnoGwAKrR96kghUmh03k7R2VXOOPJWbZkEC5LmlLl4SXsF+uelL9vOlyDm9ZuIYTHruFkJmzXDJbPXp9wU5dwXhLkG4tAts/9Fox6ZWoQo77Svk3Fz4BHK+cZo58LYWHbLlTAWtvQlyj8OYB+fGvbz5y8BNX2QJC2p3fx+n3/lR43rIZ/km0RmUae0N9NGbVxnay8Mv4RgTObdtxEnXgZyzZz3mdTFRD8vIqOnc0+MRq0c+fB63aQ4veZaDwQDpqMH5qDRGsoGzzwlau1Fz3uB0ddOJJsj5xumGDCSdRsexyJT7AcLYHZynczQ4r6K1WxJj4ndOkdtSY99fahOoOYxzuqOfJcDem6qROXJdIiGUJ+o7kvsBOd+30Edx6B7gn54CfOm3gQ89Vh9He66TP7bNYBpAWFTQ2sWaFbdUADG/E0ABLM1iaZCghoE6HknqeP2q4Nxsj5hKWvt0qmiLYTHAOKk5lxTdURE/Mt77nNZual6sHKPI+Ro9OCdBUi1gDmeSpKAq0UChxr+WDGRjZc1EDWd5d+KC295T3ftd2IFb1c9L+/V7NXsbcNt/qABeo7UPsG+hhzVHipyPSGs/1gzOyVomED5NI2O5au303poMC/kWkyLer0DOl0drL2ZOwJ3FeoRejg2HmQgf7XNOGUzJzV/F6f42rPIW0Lz+4+r1wrLHze3Uf08JrV0kvvKEsxJocL449BoiZFgVdpla/Q/eowUwWvKNCsKJcxjFioLVAC8dLK0Rv3IKK3f59s37kH7qOcCHHwNs+x/jWpdLa6dsFHaOHum4UU/mKsGGcMCCu15xpMh5xX3R5szwmvOxzMIgosefvxdvvuXX8OP67+E513OlcKPkVATnnufhxNWM2l4Ozu209ijn900wcMganfUd82tUs9acG2rtVIfDy1Azp4ZtbFPgxXxuVcj5ojqfErX9F/8O/PgD+mu2Z0/1Hh4CdjQ4fyCaDdFL+4qqOLG+/Blzwfv2HwIogP/52+rvsgTnKd/EqFPgSVo73+D+4VF4xcG/xGP9GxCR4Dw1ae1CEC7wVACdJ4TWrr5/vjNKdp4sfjRoJgvbjGc4Uhpynth/FlZCzi2iVDYzWz7J73BsLFXIwrBzsn6/iZyr4HzS72BmjCHnBwyEUQTnrDVRgSf3v1lC+Yci5/UILRqcw2PBuee4diqcBAD//Aysvvq9+msWQThbK7Wm10cxe6f8/Rz/DgM5z+3Oa9odipyXepwD+nyhjoiRwPE8Twr9aME5ua7URM6rHK4hok/kjfqv/NrTIkBOka3GFPvfdKKzBI85YRXO3jSFV1503PK+TyC/XgCEDgE6APN7CEK8uMcdJOek5jwS1GydHj1VZxmQUnCeGQkR+tzuvAL40IXAV3/XeY62+VzQxApPUh5Jn3OR8OgmGbq7boS8h4NF4JavA194KbCwW7uGXo/dh3ESnAc0OM+N8SGea9RSCAZ3nIpsgDdmH8P1tZdjYmkb/9ucOi5P7Fk7CVTQ2mcyFZzXkGJFqNaETpffz1GDCv58s7yQbC0zOJ9p1bBKCsLRmvNcm9sNzuxKUst6319gAYyZvAHrIPGV2h/j3L2fV23/dv7Mru+SJcBBtQ6hvd+egL37++x/6lymPexb6DmRc3PNY983miBckuXYwYUyt65yB+eC/eCsOV/uHuRKPg+WdF8lrQrOB2wOfO/djF3iMn5uvTzAD7MzAACTs9cCgFbP3SOtEIOf/r382SNCu9OFJSCjiB8ATRBOJL4AzsqjgoOLQ5NRgZfh+XMftf5NY54V2ZEh57/4d1YD/KFHl/bBLdMxzt40hTTPEc7yxNI33sz+HywBH308cMUf24/r0jwwkfM8R5ir825ki5WJnnCwHFq7JcivWl9caL/2niH3lX5u1zWYSNjYWL90M0ODjRI62qZQUNtv37dof5baPCgQiftWn2L/E5+xcM2vUY3ci3pQIAp8iyCcQWsvIee24JysYebzcQTnRVFoZWklLZavXQ58+216ksz2nCp8jgejHQ3OH4imCVXxhU9kpDwfaK0sfyYb6A6aK3Nt+xw1Qmunzp+oOTdVKbd4exGRmvO0hJyzGR35vkI2skQGerTmfKlLFm2TEiOscFwj2QRXmcI6iQs5HyU4dwjCmeaszXJsAi5E3XqMETbiEq1dHX8cS5hxIeec1n7cqjGc7t2DPws+ivxrl2vvGVZzPl4P0fLIPc4T9JOsjJzbztVU0hamCcJxtfZ+qtdEAWiih9q8CvZqSLSa816S2TfipKu/XhTsH98Ax9DFdMOib+CitQ/K7IpSHWdRaNeVJomu4V9Jaz8CASBAXmOKAHlANi+BnFuSUSvHavi337kQl12wefjx6dw4xNug+H6lKvdMRmrR2vsq9BdSdXxxvrLXL3sOk/ySaOYdgBFsDeyOxPVcPMaWvLHMeRqciyRlfARq7Y04kCKKS3sNKvsP/4oxTH7xVe2cB1221rW04Jxck0utPW6qdk98D/FQ4MXBFah7CSZ/9Kfa3wAg5orJCUgidNOjxBfp35Nnkv69Bsr5qnkJZkJ1vzo9oTQ9oggev54eoZSb68+qeoYJQZ0fX6M58x7ZJ+pcsDRLyBgRKItNtIvPGSqMiJ0/Ze3//vGJDOU0bWG3vp6095d6WgNQwblJa1/oYw3syLkmCCeMro8VtPadhzrIctYScc143UlrF0GEU619JPYWeY/L/zDvdVVwnqfAle8HfvAXwD892f29/F4sZQH2FgwNDnrsXtIxI6nth+5BtPtq+bq3pGquV2ABSfuQnng1y7Bosi+iwbmhF2MbW4aFyHFi9zrH34zksWVPHGq3fI39v7i7vEZkA7zwvE1aVwXsu5HR22/8IlO+pvog1Oje5xSEKzOf/DxBUdF9RiDn3SIaITi3MR/L/qxiq9CEf0VCuMo09oIxJxZ3G9T0QpaLAEqx/fZ9bUfNuXpvCz0pwiwT6XRPGrStbXFHNnId4xyDYK3U7B0aAuSathT7u+Ue0rli3h8qCEeuZb6bKNAPltaogslFx5wVOR/C8nuQ2dHg/IFoNlq7GIxBzR240sVnVIE4I0AtevNyotDgnNHaOYpBPnO4GDeQc1F7pdPaQ43WrhRKKa09osFc06HWrqllk8WKLPg1z1hgbcqY5s+u1/JRae0u+tf9gZyPQO8zF3iSWW3lS1jFkXOzNlcshFtXtjDFywEKgxotkfPYvlzUowCTgb7xDTJLzbl5rgBzeK3v0TevLC/QTTJdTRTAJm8/olRda+ylGK9HOq3dGZwbau1JFyL4CLwCa5uWzU+jtRNHxDJGtPZEnUPA+07V0NosGTg3w5KNQmun49yPNPGnHD7ygGxeIhtf+p6KsfaddwDfepv+Gh33Ijj3gtE3ysU97kRVnqh1z3RQ+P2YjB3IeUnAqYLCbi1vKb9WkNfuiyAcoJTmk4NGX9c5Xt/a1xGmtM/GV4vAFz55Vrk5PkSyKGqq/cKSCAvmtrFxQiiT9ZzNpwbtejG5wX4hRSYd0PUG8jsNFeD0ez1GKx41qODPj9Z714xEyPqQzb++VwNqE6SVWqrtXTXuVFJ9EzS5gn1voeToCX2VtZMkmdWbZygk4EDsLI6oDd3a9iOdFg1IWvu0Z58HgRmcf/aFwM8+pn6vWBsEpX3LyhZ839PfS9Yy8Qw7fYvvAYxWjkDvQZXgqfkZQwhLe6+451XGz62d+pgDC4BEkOCTHu5SFG6/oSJN1vHQyxG+7wTgHx6lrt8sFdLU2okvliX6PbMJwhkWIENc2O8tbWWL/AhrzinT0BwneYKnPWw9NtaN8Xzg1uFBKl1LNEE4o5WakeSMkKLTcQNHQZ8G50Nqzq1q7cZ5f+wJwAcvYAH6KGrtw0QItXI74/vnd2kBNqOLq/svFNvv3L84VK1dWwskcq58jFrek10slmV3XAF85GJN32SMC2aW1dqXj5x35t3UdY2BQo5t7t37Fug4T9X4srApNTsanB+1X7ppdNtM/98P3fQNOhmOEDmngZlGmwTLng3SXKOnLKKJkAQYScInrFFzHlBBuP4CHj64CjUMtJY3GzwyeSldzHW+CblG20It0BFKUTURtdLxbTXnowjCOZwX1yawHFrSSMi5oNlx54Q4R828LZFzcyEUFKJ1k3WMh7y21tigZM25AzkHgNWxXi7QTyqC83SE4NzILAu6pUnxPN3fpv1e499Zp31rbcG5SRMs8lKAvbZueXaaI+IWhAMUytkZpMA1n2CZ9Vml6JymiVHjdR9p7bSWM6xpn0nh65uXCHZNczknSRf40fuAKz/AnBD6ujCx4fvB6P2sF/dWOPIDNXdN5Jw/hwme9T9QRWt3Ief076MYFYRLK4LzdDB0fgtU1pszKNJiDCZL2vgSwfk4aaXmk/U5TY3nJu5pXKa1U/MO3c2eIW2rlbN19SPpU5HXp4EnvsPevhMA8hRh4GMiKrDSYCy1UvV9XpGyZzSyIJyOnMehz4JLYms89n2zmGYBnnDmiwwhaX9UC9gxtOBctJeziHaJcq61FDnvzulq7HmmByjmGGrvLyWdOuEke677btTe3+93sdhLtdZz1CJk6IGweG77D1aaIayCVaPVmwNGclqdg2T5aLT25SLn5B645rQxD3u9DvqZAwHME/f+QI2f22LiY77g10n8GAEEyGszg3PDvDxl+gwiUWYG5xpyTta5PDWQ8+GCcCFyvVUXsaiEnNNSiBHnEUVyzT0kS9Do7cdzTjQYYml/eM96V22x2Uot0f3QECkW227WoM/ZJl2t5twyvjMi+vgrfwWc+gz+OrmX3Tm1J935XYN54BKEG4ac0+DceAYLuxhrjFuEVCurOIEH59sPdZAMaHAugBW1vk2DtJ4U6ze5l02vhwVLZ52h9unnALt/rr30quyzwGdfiPHYc/ojAXKNIcv+npQEmq++lZQsmjoe2thQ13/AYL1pyHnmGPO28R8dDc6P2i/btFZqfLKIBcqvQKVGyVybZgbnC3vwqejP8dvBf+q0SbBMIAvOFT3FQ4HQU4tnJpxEfr6iVVREa86v/ABevfut+PPo45oTvdkjwYWFJswOmNjfY5usAh1JLAshsIya8/sZOS+KZdLal0Ep5EmNghy/ni0qQbiSWjv73OrxOiZiNu6EWjG2/Qj46u/CT9ixXLR2AJihwXmWcOTclZ0m1zNicC6o4VK52GCPHGodz17mjrU4144LOQf0BFaRl+bM6nhI8karObcE55QqahkfWboM5HwUWrvWoqivOTUZjDpwEeyWTspxDtra4kiKiR7MQ2rOS+fsmgtZqjZ0waQxgvPxiI3Z2ZJau1HnX8U+GbEO2iPjVgnCWVC/TzwVeM9md8kGFHIeL+60v2GwpKFQoh0RpbV75JoyM6mSEORc0tot55P1Swr/TY9d523+cfDedDfw6Mvt7QgBOca21ubhewXyoIaMF2v4ZMxGSHHv4e5oaxkgn59Ah2xrz0zO6Mj7iyn2AnHmqdJ1jXs5qUgce75KTvXmLcE5YwOM16n4aBeYvUP9/vFLgb/cqvQzxDFaXLwpT2RXgu3RsXj94FXYHW1hf5u9Qxufbd4DftKz7zMBMhRFRVcAW/BSFEDSxd0iOF/Fg1aN1q5+ViU49yE4d60R1Ix5+MNb78W3bt5vf286qKTsm8dcSDzMQwTnKnikonAAdFX9KhPP20xq0WRf2IDsfJENjN7wg+o1HezZhrkrODeehaW96FAjwflSz/ievTcC7zsZL7v9lfrrac+djBOmBee05pwi5+WSwBAZFtvDfdNOHqKoorXTe3HWC1mALr5T+M87f6reUxiCekdcc07uu7nnL+jIuRmcrxyL0YgCFAXQWSL3QLb2VceWyHnUVJ2IyHxsoYeF7hEE5xZ7WHYTcNt/IL7nuwg8O3IeItcYsgDYvaACzVmCrO2oOTfnEAG8SoDRwggBuW0fCUb0OR4kdjQ4fyCaTUVaLCaVwTmZDBS5qapNMRb5YMeP8OjgZvxR9CkNmQHYwppkuda/MESmIeeZcID4IpbyusXQ9/Q+qACeE/y3tkjqwbnLYaeLIw3OLaiDQEfo+zREbQRa+y+j5jxLRmc2AOz7P3IxE4pymTjvGkfOScATJwtSEI4uhFleyN9XT9QwJtZZcc6feCrw83/G5dm/AFBOjs2mQz3p0U8yxGZ5gfziEZBz7ZklshayIYZQbVx7+654KwBOAc1zVXPuQs4BPRgv8tIzWRnbxgc594o+54CiIHcGmXXTz0zkvMIB/vZNe3B4aYjjQIPzPNG+MzOR8+XS2jUaLi0nscwNPxiqnCrFxrqHgM5B+5vyRKFXq07i36cH53xYY9asUzMFvqqc/FGRcwut3Vpzfu9V7Pvu+LbzUAw5LzDW3c1eWGGI7w105DwTXQRos1n6fEvIOW2lxgOWThk5B1AWaOQ2NdaCJ5Ag3+Eq8Pu6NZpjX1FfjUFRRt0iZNg11z1i5NzscQ4AkxlzBHdnk6w3LtE0oYGN6CYixUf9kNHgAZYAMc4pQoaJRlRe7w6R+3Tvz9j/N35Bfif7sjHVQo3Pk/dOvBVfzh+LbVjHXjeCwzZvMzftO5BzL9VLDEyzje0vvQz4i82Y37sNAG+jZr6XqrVLdNmh1r5c3RNnqYp+rn6WoD1wsIIOkPskej1bv5c9v7mBj7mC738k4dSkLCpA3v9+ofsjqekOz97O/rcF5+Jag4ihm+J1asYabLMImSaYRk1Lbh9pn3MSLG4/YGjxbPuh/TNpf3gy2EZrT3olpDPr6/tihAxLFbR2YX3Esg2vdXyTe3G47+EFH7+G/1Yo/3k7UZ6f26Ffk1n2JGxZtHbjGczv0tDvGJlM0ANMJFaMxUxDzgXbVJ3HNPj8iVtqfBFrood5V3C+46fAV17DFPqXY93DiBzBeeQXmo4HO99UR87TLsLBHPmd3NeeMfZIrHKA793CZ1tYmAOu/TRj6LpKOcyEuh+W4osHux0Nzh+IZqO1y+C8itbOJ5OpPlsVXFbQeHzjbyEypHmBvK2Qcx+5VhOXyYWG09pFn/PAt2djySKpZe1cwat2XdXIeV7nDphLSIUstJ1Biv+6dR++fwtp8yTeX3X/JHW+xzLtn3gacPcPyEk4sr6jCiMBjJq1++fALV91v0egbDzoov0q/f4CVrbYmFnspdLhPbjUR16w/WSmFWM8Eokg/bmf590MoBo5XxHpG1w/ze1CRoB6Hlmq0bw1M56tQHQa4hSM4PyeYh35/oFSa69EzquD85nI4gAdAa19qZ9aE0F5ZgTn//5a4Au/aT3Vt3zpOlz8f7+Pf7lyG9LM4cxSWrtxThl8eJT25UTOXaUI5F5owiyWcez5Qylme7FC/WKetzyXRClji+BcJAa4AzbGl5TqmvMhyPmIwbkn3rc0i5z3ta1Ua7epenNbNV7DNBZRy/k4X3Oa/oZBW0OhwozdZxmc93TKbJqZgnBUrV2In1mUqAFg50+sL6+YGFO/uJBz7rBtDJizvt9biT5swXmKXYeXE5wL5NwtRtnss31ofzGFw52B3F9yYy7WfIPW7odq/eiVqccRUkzUIw31YgewjBOBHMlgLVZ9ibnN5WztvXXAA0wjOBc1uGOwI+dNDLlntpKXm74IZH2ce+DfsAILuORnLwOu/ZRRR16mtXfuS5/zkZBz/R7WMEAOgxUgxprWNaQCYOD7yeE+CK39sAQlhNgdS5ImEhG/rjheO8xeT39uMjg3mCXaehJEagyYAUNGg3M786Hhue9rjQbnuRGcH95e6qpiNeJz7d7nYCiYZqkVL5lWKsjHjBmAZQMsLLLXlgo2ByKk6Irg3BJ0CushRk40JLC4Vy8rEffCD/GTbfO4bjdN5vO/0bZwh7cZ58bv7TWfBN69gdViA8uktbM1uS+SkQu7tDkTIykl+MTvWWIJOjXknPsmUdNaYtD0+m5a+8efBFz3KeAbb6y+FtMGSwgpdZ0kWiKvKK8zeaLrRSRdxFpwTq6xIjifbffRQhfn8un37MMfB/79dxgFX2Pm7Qdu/govwzLmzUOsjRpwNDh/YJpNEE4Lzocg5+ZEqEJpKxxTzwhqVntzAApkpDdxYATnZis1gZxrtHZqrgxtNtA3O0Hvd9Lay0FCGk+pY8nj6DV3P7j9AC77x5/irHdegd/6xNW45V4j25indlRemEgAJD3Wq3jbD4F/frr9++SJDUZ3UgEVDOWpo/6K1POMry3/vTePiUYoKbiHOAIr6EMzrRrCwJfCIOZ3iP67VTXnE0QQrsgGQ2jt/HkMFt2Bs1GKIOoFXcj5DUtT5P394YJwphVZaZ5MB0MEZ0YMztv91HoORZrotPb9NwM3/5v19HzkmOsk+KN/vxlPe/+P8JO7LVnxUnCurieDD5/WR7qCc9Gv18YgETbkukdBzvcXU8gFVZe2WKHWPayo2CtN5JydX4uP2YNLA62PcYkhU1lzPho90MsGQPsA8N7j8M57GYsl8lF2/IQJ1N9iaybqOMbjSc6xtYoOLcxAzuveAEDBxtTcTuCvTtSSS7kZnGvI+RDHZcdPrS+vnCTBue+Y+3xcCzG4u/tT1uA89DLsmuuMzlLgxw33Xocf1X4PTy5+VHqLzxlc+4tpJnTJA5HMqPePeSs1mTj2I7VuW5Dz0Msw2WDBebdwBxAAFJWyIjg/nLBj3NjjpRlGzXO324GHHI3CHpxP2dYhahWCcPFgDs8PvofxPVcC//4aN61dtn28n5DzEQXhpmvAWMO4x7bxWvX9mQrOpSAcSao3Ja09ZSyRPEESNHFXvk47zL7ADM4NWrsordGQc1Ibbfogeare5/DZ9BakumnCtmbN+b//DvDhxzpZL9rnuB2YHS04zwe90QWFATWOqBgcAKR9zM+z19oeS5qEXo4e7zxh7uHUegUNzjPVDk4g9uJeBzXMdROts8Qv7p1l/uKe69QBhVipec5f+z32jP7zDez3YXtBXg7O7ym4zzW/S/Nn635WErFcE3bw7vCjGL+XsBbEd+aW4Dxu6tRxbk30sNAd4tfs/Fn1301Lugg15FzNudCztKTNEn2NSLqopWoMFFpwPqd/lgbniz18PX4rPjT3SsRI8LiMJ1V2X6uP+f/+S+ALLwF+/i/lRNhDrI0acDQ4f2CarZWaCE59Sz2nWPjFQG4bi3BSFZzzBcHmrBsb4hW1N+Evw49oyHnNB3wqPGUIwqUCOfftyLlX5TR/9x2spccVfwz81fGshk+jtZPrMkTbZosJDCLugDlqtfIsweX/ei1+eMcsBmmODVMNnLFOr2UeipyLDSbtle874EbOR6WliXMQZgvqJe3OKzmGAIDePDzPw0xLtFNjxxN0IlH/2uKCcJ5RztDy+vC8sloytUmf0OXTBEWBiuCcv+7SFQCMNnmJdBprjuD8+vaU9n6Roa6ktVOzIOcTvuVea63UaM15eY61aK2jxbksIecVFiDHuZuncWF9G15/8E/wBx/9N/yfz1+vB6SlpJy6vyw4HwU5HwCffzHw16fr9YN03Gmv25DzABhbVX6d2P5iGgPhULlqs4WYT2NaBa9plzODRLKGB155gTlK81tOzfmIAaOfD4DtLEhcne0FUODcm/4M+NszGSoJ6A7M3HZnSdHq8RpO8nm9+fTmsqNq1Jy/KfwcflL7XUTtPUwMzEgYlmvOBXLeHK5ie/AO68urpkZAzvn1rvVYsui27oRCkojFy0bO2XG3XPUn2OjN4g86f1V+Dy/j2FdMsRaRXO046+lzMeYOZyGR84DQ2hdLY4PR2kM0ohCLMPYD0ySlmSCpNEHq+ZhL2P2QweC8rjPQ63Yxhl6pE4WwCW9IcF5kzrV0ymsjiMnzF7oQ9JwBNGuGaJo4rrAqpte1nwI+9Rx9PXQy3/RxunHcw8M2GmuRNTivmr9sTB3seeighoKL0Ip9sUnX4cMsSFtsbUYX+rw4GOrBeonWPsafa9ojYAlBzs09PUtUEOFIkLXg9i1qJq3d5jPcOyQAI89t/vBoNOf59uJw5JxanjLfrISc9yVy3g/UWpJ2edKxKjhHLFmXyFPGFMgGrGUhoNaRsIa5TiL9TAB40+euweHFJX2smYlS0ycQJZDDgnOLL7a94D7Xwi7QDjDjUQHP6ETwW8ln8YLwe6i3yflI5NxCa49a1uC8VYWcCzOTJcMs7enBORlvoVeUgbTcAB2SLhokOO/3yJpkjo3d1wJ/fz5w9/cxt7iErf4+jKVzWOEtol2QuULXHcFEW9xjQc4fWmJwwNHg/IFphSV7XYWcyxo3PpBNBG0U5NxW02WhuD43/AGKRRWE1kNoE1QqfYuac48g55aaEK+qRdSVHwC+/nrgf/6W1aX+5B/04KgCOd9erEG/UDWI6prUz4cXl3C4w/o6f+f1F+FHb74Yj946qZ9DNqjOIgsnL+1bF1ErTTjtLw85145n+Zyo2W1M2zOIvXmgKHBsk12HoAALpfbVE+wzAjn3LAj9mqhb2miojRF6nuh1X/OIA2O7hqpxmeg0tTKtfUJ7+x6sQCKf9wCNkJ1rN8mG15EBvMe5EZzb1JPpsejml5ep6xpybqnBLLJy33aXBcjx5NPW4tN4K54UXIMPRX+DL/38XlxJEXTzO/g9zAsPURgatPYp+xdlKRMCbO/Va2zpuOsPCc79AFh/DnZc/AF8PLX3J746PAcDgbC6as4P3sn+n9qk0+TTvlxzgiLFVJMdR6O2l/qc3/eacz9PNEZACz1s2fY59st3/oQfi3zvth8C7z4GuKdc27l6vIYXB7wm/bgnWILztnafT/e3Ya13CPifv7EykJyt1Khau8uEw2ueIw3OK9TaAWB1ztgPu4sZ9FFGm0NnzbljTeHHrezly/e5/ZjG7BKhtRuBqkDOcxpAC+S8t1BaU0NCa/eq6NQAEBr1xkEMjJMgLx5Dlyv77yxWo7Bcb7/fxbiD0g7AqeIu7fp/ZdTcm77EFKrJGjWNNqbGiLO7/Ur1Mxn3Y7KzhAM5rxJN/PfX6OrxwMjI+USUY8uMEbjaWDdVc5Sf22wXADzkQk+DB9UNSmvna3wvGEcH+l65EBsJxc4smxtijRvjPhLdJ4JIzUdzbNOac0dwXvVs9eC8sK+1lnWtn5JnmFQESIYJ9PnwQnt5yPlt/wn89WnA9Z/VX08HaC+yPWkQqrUk74ngXN/DqWm09myg5qg4L/F7WMdcdwDAQ+LxdrHzbbzx89foBzRBoCzRExBTm+zvM43u/yZy3j2kjXtZJkhsfWEp4aoShIubzprzoYJwyxEdBoCkg8DRSo0i5wn1qzWwrItWrr6z07GMveZK9dqBW4Gb/w1LbTUu1zULLIIG53ROFfJ7jiLnR+3/H6P9l3MjOLcpIdMAETiy4Ly5AiVHyYXudhRy3ggLLZDLTVo7F35iyHnZqfSGCY/QOsmwpm/SFTXn24q1GAjBF0P5W9jBBXZfnvawdTh+9Ri8a/8FuOoj+vcPlqrbWMm69p69hsraY7uDyhq6KrMF9YIW3JyxO+K9eeA7b8dn5n4Dl/pXqeCc09rXjLPAR6CQfpGVsq6nhHsqT2uMoDsiOJfIudkWT1xDFaPDIQhXcwTn+4tpJB4fX195NVZ/7GxMoo00LxSdtcqKvHQ+VlSDjr+ekZk2HJoxWnNucYyK3KC1V5jnFagRUawtAZuDdx8gTrC5GfN5n8JnNH+a1IvHHBoQCXk+xBmkm6HGGHAg554H74xn445ig/V6bpp4DPoSOXcE58IZndqsO+xpV605WSo7EUhRuDwvz9kqZ3NEWrufJ9p8nvEoUsiPb873wSILqA1be+gqnOFvQ7eI0T7zN8uOqkFr18xyv0q0dqrWPqzFjCPwWbuCJAxcgnB8/Z7KGfthbzFdWXNemHuKK0nEj7tUIwGTuY8tsjVpv0DOZXBuIucZPyStObfQ2vn8CLkgXDMOEJZ0M4w9UiQPqECYEZyLuvk+YnQa60uXmvR7GHcotQPV6CoAYN9NbLzf9k3WJeAjF8k/TXttrGuQubDjx+pnMlYFutx21Zwvh+kFOFsJdnv6tXhpH74xZzKb6nKe6n6R9gF277s5G6NekyPxnErbkoJwqQye+n6jVLIQUv2Icf6cdvEgzwsUrV0LzmMFOpjBczac1t6soLXXQcvxMvvebyTlfnL3QZz+9m/hvd+6lb1A1ucJVAuxLfLkxMJie7T2sabtvk7/Peujs8TWyCxW65snAti6AYQQ05BzmugR917O2RjzHTavC+5fjkc5rrqL+sCWBGCequQvoFrQLovWzs5htpgkCUnl101EBatlJ8nZLLCMA4sg3JRopeaoOW+hh4XeENBB+NY3f2U0inu/rYMF5HxCT+2pcn039kOkXYwXas/qUuE/V9ll+wC6WnAOHTm3rSNpt7xnjdq69UFkR4PzB6JZae1VyDkJEIEyvbqqrZpYGMKaOo78nN1h8EgrtaaBnCtBOPZaIgXhPGswYLZrq7SoqWcuhQNaFCWa57Z8DXoESZVGznVxiX3+6Wfxjfirv1v+Tto+x2Y0MWKrqbdlYpeb0aRm26CFs96cUWrt1LI+Yx8A+NPoE6ydWp5h/4KOnLcCHpwjLynUnuTb0TV0DgH//Eyctvcr6hQlcs7HbqlnfcGR6ipaOw3OVZ/zugjOjQTVABEyQWe8+/vw23vxnIAJ8w2SEZBRC629kVvOz0VrB0pUwI2dW/F7wZfR6/XKvXIBhEW2LFp7PSTUYj6Xdhwi5+hAzjME5eA8atpbj2SJWkfonNKQcyoI56g5B7CiFWt0Q2r1qTWqTrDjqDkXNr2ZBT2ctoyE0ErzFDMt5hjJXue2hFjVWBsROQ/ygbaWzsBS1mCb79NbSi81bmAdEL6YPRb70pad1m4mfwAWDFiDczMpwM8nbh2x47JijMzbIbR2kYjrIUbql5OUEVIsDTKkA8Ppd3YNEMld8r2CZgywdZCvUSw4H6i6eBM5F2rtKdlDNeScP3++TlG1dq2dFQBsfLj+uyGAiiAGJlRwXtTGGHuH28FaOVmVDbqYEMi55X40HbXoJbv16+z/fTfJl6a8Rayuk2vYQcT/SCAias41QbjCjqKNZA5A4Pu/MPaRbFAKMA/0XWONn++eG1jnEqON3QARppoRfFGyI5FzokQvkHOvjh5Fzj0ft04+FgDQrq1Rz/AAD3IbU2r9pOsfVWs30bwRkPNWRclCZc05/Q5i1+2cQ5IV+Pvv3YUf3TGrJSWrEkAAkDZZ4NReai+P1i6sJPo1QK/D10uSfAyS8mulQxWRbMOrBWgSOVdMlTkenOfc//qDS7bqYrQCFaeWJ4bAXFe9btrT/hp4/B/q3wvIfbKHWKt5F7YhOMREzT75NDmX7MF5mda+QiLndlp703Mg5+Y+sOvnrE77H59Yfq9pvTknch5AIeeyHC1lOijCin4b42St6tNEnBgbRtllsbQfXdJWbm2r0JlX8xbdFrMrAHAUOQeAnTt34t57laL1VVddhcsvvxwf+chHKj511JZlWsuHYWrtngp+RLaTBM8AhiDnxKkwM5mOdmbR4bvlz2Vauy5uIZzzKPCttPZWPrznpfripr44DoyFmti2Yi16eVj+O/k5KFIcs6KBs3F7SZRKtnoSmfPYUR+lIefG9dkQPMDudFssKSxOyrDgPLYE58TG0cHh+QXgAw/H825/HQBVc97wybgz2nAc6wrOf/Ae4O7vaS9l3EmpC1q7Dc1Ph4jOUOdgcQ+O2fk11NFHLeCbARGoEghIbgQFwimXOghVZgnO/cQyNjVauz0YFvbkK1+A10dfxMUHP2dFzkNkIyPnAXINORfB+faDFcE5v54MPnNQ6boRNewbWtpT64/WY9TBGLAi5+w8m3EAz9PnRBpPAC/8AtaM11XLLRdyLmxqM1OFpZ0RZHCeYCUfv7LXuZOtYrHcrgdgs7BItHusIefCbCUUnrHN9heB274BAPh89jjsW+hZgvOOPYkX1q01+oVJaxfXG7eOWMnWj8j4cArCse+NeJI1KUKEtMaZo1ZNzspJDOTUiZz3FoDZOxAOyD0+QIJzzg5LvQhzGGMJRz4nPCNhFHFaeyEcbz8Eanyv68+XgvMQGSbqIZpxWO44IWpThQknUaO1K3S8iMa0GHdW9GSnh0j6KnAS6Cz9ymLEQMlSAjCNNlbGVH/BLo4q1dqTTOlYUD9EfK63AFz9T8PbNFkAgV6S4ds3Md8x9fnYIiUqwna5XALhW3zkIuDmLwNffrk6BoBBEbG9TIwpnhDVWqnxNbGLuk5rj5qYnzgBl/b/Ap8++zOqvnw/D87rU6qEgV6bHxJau4mck+DcETxIQTgbOuobNee2vd8IonskEfTGL16PlLTsmqgonQCAcIolJLrdzvJo7fLL5/Tfsz76XPwtrI/JvuVRNoIgHGIVnFM/1GQo+RGntUMGsY8/YRrPP5ddSwYfxYSFvZWlenAu9jHb+n3CpYqOrdHa2fPogyQSiG30iE/J18ncxqAYptZuqzmHo+bcBAwM36zSunPwPQswCA7YFCI4F+NdH3vp/B74pGZ9QNvoOZDzYnEfolwdZ3W9QIN2p7CJrSYdC639aM05XvjCF+J732MPfO/evbjkkktw1VVX4W1vexve+c533u8n+P+kaX3O+c8iyDP7nId13WkFyhvjKLT2IC5n7R3UND9Vk64eQA/OBTqRCWdNCMLZ1dprGM0xBsDuha2VmqUWa1uxFt3cgpyTBTBCitdu2QXv408C3n+O9vmOEIvh1ElsPNd+TsLJS/tlWrurznVEoY6epXbTWncmgpvWTOWGBwANb4Dw8J3AobtxavdqBMiwitPa6wFZmI0Ez3o40E1DeRiw1JzHluC83x5Sc07+dsvX8JQ73o4/CD+rgnOC5C1w0SbPuP+CAj96cM7Gu1QRt41/s5ZZO2e7E31870ZrcB54oyPnHgrUCHLuBSMg5yQ4r5eQc0dw7lJiPwLk3PM81GrqmXyneASKN28HTnwS1kzUiCCco/+2MIF8CHp22iOIZYpVgtYukXPL83axh+ixhlhQJNqYXWELzq3dGYw5e+t/AGkXe8INuLHY6gjO2/Ya0bCm5vv0FuwYP4t9rUn5FU7sKLR2l9H12tlKje1Pgv6dIEStRuY7D5SafN4mHDn/QvpYXHnOe/UaRGp7rgM+8HBsPEyQXoFiAgDvGNKrrQTgcVo7O0ffCCzEufliLzVbqYmghyc2Iy/DZD1E4Htl5NxkAZmolyEIlxuJyb1ZeX0Oi0TVnLcc9wPAXH0DcOHldoYWAFupVMMbYEXuEFykyDlfLIsC6A7461rNOR/DX3k18PXLVWDsMkti6QtX70Sny44T1HkS2RKcL+WO6xP3WJyXcNz56wkCrJ1sKLFLKQgnas5TOX87qOu09qiJRhTitmITDhdjwDhH+CRyPq2YRmINCGKWNAwsIADAS4REcO4ShOP3tbmi9Lemb3ScsJUWGIn+fqqe2Z75HvYeVM9+YghyPrZqEz/tPga9ir3ZZSY7LB0g4cepNVpy3I6JMg2TqUmsD8K6ovuwWMdlcB5I5JyKM77y0ccAYP7nnqJ8b5GnehtXE5GnFtbtvexJKzUbcq4JyorztQbnZVq7rta+jD7nZoKEqvlX6UbwzwaOcssAhNYukupGYn4wp5c+pn2q1m5HzrG0X2OPrGoUaHpDgvP0KHJutZtuugnnnXceAODzn/88Tj/9dPz4xz/Gpz/9aXziE5+4v8/v/02rFIQzELAwRsF/v/XeA/iH79+Fq25XzAYAo9Hag8iCnA9HtVlwrs5X0tr5+Sa87tvZ53w5lvT0zKXYJC3o3fZiDbqZiM7swXmIDE8Kfs5+MRxhUywGGww6ozCJnHeXEZwzxyUPq4WaepbaTevmsQzkHAA8nnDwUWAFFiStvU6zpgbVeHXhCM4tdG2BnMvgPDJp7WDZ8Krg3EJDfmrwE9REH04ylhZEb1tjgRbBeTZsUwJ4cM6+8yC4A21LolQFco7gvJEtWgOtCMujtVPkPCDBuRTNMuerpLX7aES+gZw37RsaPQYNKrU+5yPUnHOrk+B8otVgDBoAqyfqKgPvYOhIE2iicHAHRLMhT7ByjH2Hqjkn66eYky5aezp654SwSLT7sxK24Jw4YU/6c/azeY9u+08AwA1TlwDwsG+hb0mqFfbzCiI135/0Z7hx44vYu0uCcJTWPkQQzmUUsRnSSi0oVBlTvUmDc+YYNwOOHPF78W/5o7F43NNV//pRjCJdXHU8bTKRroNLAznugsxAzvkcky0/qSBcn9DayX2arAPIMw0Jwq9/opxotNHaSc15bqw9OwfltTBGgrU1/qwtyLmwhXAVcMk7gA3n2N/gEM8ba99jfZ2uZY0ogOcBl4dfRONvTmAsBa3POb8OQZ2/67vO8wRQDhIAfPnaXTLZ4YkkR9YvqUDbBAXN82UnLRS2B/xzEdZP1lVwzs9BtNWkgnAd1HRae9RAI2ZrU3eQqmdIg3OJnPO5JZIkoyDnjgTZuAhMLM+9SZHztGdn4Rl7VD9h7zl/6wr4HtAndb+y5tzhh8VTjPFRQ4LO0hGU3pVE1/rI+jw4b47J5LkMziuAhEPFuL3mXATRuUq0iSDVE/tZNsBEzBLsCUL8cL9ln0s6TPhUmHh2tuRqWFNroaXm3IWca2J/fJ2MbBFXXg7OJXoctazPq+U5WqmZfgYNzocIAqI752Ty+YTW3nch523dR8yoP+RAzv20h1WYk7/P1DJdh8GKnNsE4Y7WnCNJEtRqbLB/5zvfwdOfzno6n3zyydizp1o06qiNaHQRHtLnvJOF+NrNLDv6hZ/eifd881bsP2hQzkZGzo3g3IGcU6sFhRaEFlKtXThrbIgxtXZXxt8wV5untGcXhLOgyQtooWNDzimVL8wxXbdPgZKEyAYXci5aqRlq7XlmX+gBGZwnXmynrnPrFQ7asWnDas4NiztKLGWVNy9p7TWN1s6Q806dZTpX5YaOAcBo+6LdFX2ZsydiiZxbgvP+onp+ltrn3EKry+EjFo+LCFQJ5DyIDFo7R+vS1HgONuE+QmvfKzLt5maW5yVHUjMHFXAyn7MizMEyae205jwII/geczhlrbVAqwQaKZHzgNHaZd2jxx0OWnNpQSmMXvPSRkTOAaBRU9+xYlyNgzUTdYWcDzPB6BHOFw3ms0QJwon7IJ14QoV3rYFJd2TkPCoS7f5YkXPaS1ucrzlnOS29N3U8AGDvfK+y/lI/fqoSZ82ViGO25pRp7ctopSbMdAJHCs7Z9wrtkAQhWk0y33nQIVg5uXBoiwiTjQi45J3A5TdisPK04ed3mASZvI1awenHVBCuFJzzpKOkqPuBWrf7bYKcq/OejDxtXNx42Y3Aac8qJxrFZ+k+GpL1xUCQ7+mWEyU1JFhb4583afPEZLLWmWyxB+c+Fb6iRvYnz/PQikNcHn4Zfm8O+PknjT7nRqLIxaQQZim9mO8kSmBPlIlZmCtdZ3BuOOQCbRa0dkRYO1lX6wVHzgUrgAbnS0WtRGsXCHs3yRTCJ3ybxpRaL8UzFfND/G/SzrXg3KHWHvDPWLQGGh4Jvly+mDG+hFL7BcfO4FUXHYe6R0TGAr4OudYDPpdqSNDvLqPc0GF50oPHWZbN1rhkGEik1BGc5/BwR7HBXnMuSxlViYpAzj3aOYH/PfMC3Ny2+ES3f0svpxJ7nY3WHtZJuzwjYQIWrMoySGKangAfRzXPcnwLrV1alVq7SWvPs5JWEPbfon4epu3Sm3N2p/CLrCwIZ4ARuVEaWtCktETOV5eOvclTvuV0nB9FzrktOzg/7bTT8KEPfQg//OEPccUVV+DJT2atcnbv3o2ZGXfW96gtwzRae3UrtYXUR4fXVh8zHuDpZ65Hk2fdimGOKaAWBj8q1/+NipyTgEWqtZdo7b7bwTOtKjjXaO1d9TrAHNyzfwPfOeGPAABLGR/eWp9ztVlNxCiJ0Qhb45FFzg+BdQ+zn5MUhOvpdMPBkvPYYkPtOuhQwuy09vtWcw4AjZ5aDGe8BawSwblHg3N2zMPjJwIAprND5e8+fI91jEjkXFBCh9HaLVROs1cxwBDgWFDviXO4F2zdiWLd6RAoe2bS2m2BEKG17y74+ZisgGFtVhzo7DpYEhtgwUJtxCnhG8i5F0RYN8nm985DHeZUiOSAuJ8ack4YN1GT13CTDU0EJhpy3rf/PLTmnATndTWGV07Q4LymkPNhJpKGwsGlzlqugvODSwP5GgAu1iQcCccamPZQjCh2FSLREgMz3gJyM1gT63QQqvM1g3P+XFpjzDndv2ihtbss62vzvRYZwXnSBT75dPWeuDk6cm4GhtQpNIMxsQflgjLOBdwQYHyMrEE8OBdri0cCqclmxPaEqU3wRtkbqDPNg3N/QgTnquY8MBJoAVL4HkHO/UglbXIigEjWzskatL1iKePrdAk5NxxrMwHN59N0kyXT9mfl9Tn2EqyK+BhsTDkD345I1tqSnfQcSq87xrfx/mMiMq8njzG0bwy1dIuTDYAEsAsldKszyBCJ4IQmkY350bUlpQEt6AJQ6k2dIMA6ipzz9buh0drZ82gXNZ3WHjd1hN1UldZqzgmtnf5vznMqCGcge6IXu+xhb0HWaWDtXL+MBLKgtdejAJc/8USMEfRdIue24PzRr5fPJPYSpP0jEIQTxsfAoN+VKGhjbLJMa3ckJPf6a9FFXQW8GnIuhDfZOMq9QAouBjQ453tAFNVUsp3avVex/9dyv074ksYenyNga7lvC84Fch5L4WNqLYoAcz9ea48nzEJrlxa3rMF5y+tjsUPmdVEAH3sC8C/P0t9Iux0tDQnOu3Oq9Mcwr8gIcs7OpzCZgmZCTvMZLK3UuG32FFg0HaU6cm5D+63I+dGac7znPe/Bhz/8YTzucY/DC17wApx55pkAgK9+9auS7v7LtL//+7/Hli1bUK/Xcf755+Oqq676pX/n/7pZkXOS9fc8uQB28lBmsn7zvLX4uxecjcmA12A1dATNahqtfUr/2wiq4iXknAg1AcCgIMi5s1bOMFdwbqJcsq2GcK5awDP+Hju3PBsA0E5FcK4+s7ikFpRmkDup51dOPEX9svER7qBX0CNL9ccdt7PEqWidLLAu6sKsCEJlK7WVIzn5M4liuGyqtWUtc0yzujzLeri2ET1RY7Swm9GkBL10743W4yeilZpQm7XS2tsqmLRQ+gpLoJsVAUHOA6aiuvpUpE/4U7zx0pMQ1xzBeWo8Y1utG0HOdxf8fMws9DCE1YGcu+q4QmSoB/a/meajKKm1b1rBAoXtBzu6AyORc3Y+KQK95lwEGJQpYwt8NbV2ipxb+pzXyLEIq6FRV89kkvRbXjNRV60Oh5lIGorzp9eapUoQTtLaLeKZrjUw7SNNRgvOI6Ta/ZnBAgo6tvPcnkQtBefsvk6Ms3G4d75XWX+p2aBDHJ0Z1CJ+D8X+sPMq4J4fkJNuOSm1g7ox78y6V4qkm8GzuK+FjpxvXDmFqEYCEX5MIc7o50JZO2TIOTcvGDE4F9TtLnMEaxMsSFzspxgUJb4TszzD1nAWl/pXq+uiiSmxz5GgdyLW97WO+HFozbm+Zvs8mTNej7Bmoo6DRfk5x0gxE5IEs4Nh1hXrsCs4X257TmM9OzckLKioUaZR03Hc4i3uTK2DsdVKANEQeuwMUoKck/3UmJvWci5xvrQTjQjwMyUIt26yQQTheM15RATh+Bq9mNfQLSHn5H1mbezM8aTmnK8B4jmJeVJCzmkrNT3hkPG1QbYgtdByrYGcaSatnQfntdBHHPoYD4jGTuFA8R/3VuCJb5fnWENi3X9HNr6WJf0eWhwo8mtj8n7JoNURnO8MNwMAqTl3C8KJ9/ge4BNauxjbjXoNC7EjkQQAZ1/G/hd7neHHyc4TYl5rtHZVc55aGJDNgoxrfr6xTWOpCjmPWlYhZYAJrsmStv4CsPta6/ukDUPO8wRBat8nvUKxBkXNeW4k5v2uHpx72QCZEJcUe5ZFAHSTr+b0RJjJMeO0pFtKOH7nziGU/QehLTs4f9zjHofZ2VnMzs7i4x//uHz9Fa94BT70oQ/drydn2uc+9zm8/vWvx9vf/nb8/Oc/x5lnnolLL70U+/fbkakHrVlbqRExG0A6fv0iROLpmdsJHpx3Y+5sVdacVwjCVWz2OR86NV/vcy57yYpWarLm/P6itdsE4UTPS7Y4j9fZ97TTQP87gJvvVQtUWKR2dPvN25FNbVG/P/4P3edONxizH+eQmvN2FhwBcq4WxP+8cQ/e8uUbUFQg57mFMn6Kt03+vLmmFuOY1Jzn3Ala8sdVsDq3gwnn/f15bLE9eIf9vHvsfseVyPliJXJeWJCCFD4iUXPuBcDDfwv4nSvxjIvOw2suJs4TtwZHDUp9zq3IeSHv7b5Cr1mUNgw5p5lkV09eYiEylWwYYja19s0zJDiXVMuaclr5PcwKEznnztmqk9XxxGuUMq71OXfUnIvx2CDBOUH+xht18rKaQzMte/uZkkUtNfdkAoGcY56qmvP2gDkrgproE+TcWXPeZa3uRrAa9JrzGW8BBUWl+/MOWrvhbPB1a3KSjcN9C322no+iySEEKuEBjSnUOK1d7hOUwnzsxWxuOerxstqUjtIuCzkXyDO716JX9buefbaOYvDgXNQaB7mqD6bBuT8Kcp6nysnjz7PWmmBiowDaA8d+laf4bvB7eG7IkxZBpJ8jP2bqxxKtG48h98as8FRwbiYaXcH56lMBAIc2XAwAqEc+Nkw1cBDleRIjwbRPRLIcSey2EEo7Ug0B04z17Ezcrv/NXMN4nT8AEpwba2JYV+PICM67SaYE9uieYATnXVPvhZ7TIjmHbMDWbdJKbd1kXe9hD6XWTmntC3lN31+jpmy5ZkXONz5cPdu+EZy7aO1UEM4IiEVLLRmoWhJoI4nlmrR2jiLXIh8oCvi2siOTAiyvQwXnZmnIsozf/2TQUyhorILzUPgZjoTknogJ0yUSOSfjQyLnQoCSPbPJRqTT2sW6FER44vkOjQYAWHUiPy4/T4PWnsngXIj+2ZFzW8vQJu1EJIJzW+tgSys1aQ5aOwDU8x4bq/T8hdl86GHIOXgQbjMNOedsLQM5D/t6cB5jgINLfE4IFqKltz1Fzse8nt5C0GZprzTXJID0ELIj6nMeBAGmp/WHv2XLFqxeXZGhuh/sfe97H17+8pfjpS99KU499VR86EMfQrPZ1JIE1Pr9PhYWFrR/DwqrorULJ4kvsANECqngA7bls0m+GPBnVIWcUwroxPqRT3FfbQsALrqltVKzI+eM1j4iUubqy5t09QVMONxikeAO13idfc9iwpEUsqDeuossIFlSDqA5vT8/5nwAwDW184Etj3YukNoGQzfKL78C+N677J/hTsMAEYoKNkFaBEjNKUqu/31X3I7PXrUTuWhr01xRqjkvLC3gTvF3yp83hOqcQ6JMnHNxj0VvTAXntM/wwbvsKD6AHlfpFO2VrM7kgNDaLci5Z0GhMwSIhUCTzZk3EihjnBZYCs5dyDm/ngOi3dGgrW/Gtno0QCUF6DmP0IomRKbU54eYj5yh3/KFAJt4cL7jEAnOa+MELSa09jhQAZV4HmtIna+13V2FWrvI2AungDoD5Nk89pR15HU1/8PALzuJNqOZdhtynidYWQc+HL0PL8A3sNBLdfFME+0yLe3L8TqKFYRNscJbhEdYAujN67R2cb+TLnDnd1T9HF+vpifZte1f7CEvMBq1XQTnzRWAH5DgnO8TQgPigt8BXvwVzrKKrFRpL6zpc8FEzqtqzsWzyzMtQNqwclJ/rjxQE2uLSNjlfixpxAAqae2LRQNJwMenCPj4/PKiBmZ4cmbRFcuY898XVFU+HuV6HEonuxUUhC4dKid4qCAcv2eX/Rtw6btxy5lvA8BEyTZMN3CwIM/YY/tTDYlqpVafdCJlS9kw5HyZVuSar3FKTpKteVZGzmdJ4kc8L3MPCGtqPSfBeZLlSLJC1f2HdZWEMPwTtyDcgCSnwALfe38mfx0gwLqpBhH7WwQ+dxnO+O5vwEfOg3O2DiymMToFCYijhlJ1TzKVfBC29oxyKzXhE4zUSk0PvjPeSq6JrvXvABAXIwTnDrX2Whg49+fSd4n5yl9veAnqy+miYxq//3nSU3XXcaucdIpbyMvqPpiNNwCA8n1oAshAzkUAP9mINLV2Oh+f97iz3QxFwTJz0drF/mGltbNr6zkE4RqpnkQGCHuBWhWtPbLcN3HqHqk7N/2NVaeUPzBCcO4yipyn/HyKgR6cx322N3Z9tkbWkGD/Ql/vS24JzteSEtKwN6RzC8B9HcNvOkprZ/bFL34Rz33uc3HBBRfgnHPO0f79smwwGOCaa67BE5/4RPma7/t44hOfiCuvvNL6mXe/+92YnJyU/4455phf2vndrzaszzkgB+MAIWp17jDwxUJkKw97IwTnVGX29GfjytPfgb2FmXUrL6C74i0AgNjX6X/y50wE52xRZLT2EYNzF+potl0pIedmcK7XnG+bXcL+Od2xLwXn3LmaOOlxuLD3t3hN/gb2uuvco5ZyfOlGuetq4LpP2T/DA6k+Ivhiwzd7IYMFVQMzI0gcgMNLAzTRQ5ARxVcT2WmUF0NqqwN1zj7dmDoiOG+pGmyqlrw0q8aOwbgYDNj9ViiJpSSACsJZ6pB8i/BdBl/2LLbWZRrBnhAWKUxBOFfNOb+egyB/p3XnLuRcOKNahn848hB4uWICDLG3RJ/Byq+/VL3gh9i8gj3r7QeXlMNYGyslCzIEduScBue2wNDV5zxPid6DQPym1N/Js1k1ToIZw8kIRmnxRY8r3k+p91mK+p3/iUuDq/GO6JNMFE6rOefzy5UsSXsYLCM4p/2dZzAPj64f3TmyThPkfO8NwKeeDfztmfxcRHA+Cc8DkqzA4c7A/gzMpODCbva/EFqTgnB8Xgh13hXHqs94njXh6YWxPhdMyiFd85w15wb7yA91Bgs/Tz9PMF4LJCpSrzfheWpf8SoExhbRwKDG9yRR1yjGX9TETIt930LfsW+UErD6HirW7UERSjaHXyhKcoKQUZ2B8lpmE4QDGPL6yN/BosfmaD0KsGGqgS7ImBdompeiVfDxWZtw7jWLIji/v5BzQAs2Nqfb9NfN+lPKyjCTEsKCWDGhSHAu6oJDre6f3wtjbvYKV3Ce6Oj97G3AP14if41rTYzVSJu8pQPALV/F+J4rcaq3jdecs++ay2K9bCxSNefdQaono0QZRInWbqCqJeSc0NqN+Zf47NrrhTs4D0cJzku0do6ch757zTO/S1wHX6+aQab3mjZN9IB3WU0E533FDCDIuTqPhlUUVAA/Nqq4Umvn/mXOg/NmrLc7E3uAH2G8UUO3vsY8EvO5RFJZ0tr18VyIZ25tpab0M2zBfy2jviZ7LvbgXCDnluR/3ORjseyDt0AU201/w9YFw2CyLNv4+YnEkikGG+U8WRGw51/zEtYmVNaNe0oI0mXDqPeAvYvOUUE44O/+7u/w0pe+FGvWrMG1116L8847DzMzM7j77rvxlKc8ZfgBjtBmZ2eRZRnWrNEn2Zo1a7B3717rZ97ylrdgfn5e/tu5c6f1fQ84s/U5p2gQIAdjv4jQEOq4fLGoF2ySHBAUulHV2qM6bl//TOwoDAaExbFbABcPMWrOFXLOzlvUAS6rlZorCEqNmnNTrZ1vOhOc1r4gkXN2jV+/YbdeV50Nyg4I30C2rhzDLqzC3sUES/2UObi2DGYYq81uxP7lIjgfFBGimC8qK08CXv5fwMlPk2/L4ZcXfR4kFUWBuW6CUwVFfWwNc0p8X20oAHxLppLaDEitDrm3Pl/IF9DCbliQ8/md6rkbYnmDATvHUCDnwwThLMh5aKHVZQgQSeTcsnQZm3+DOwa5uenZkgVFLsdJt6ghjfgmQqntrprzMY6y9BcYivittwH//V77e4lFSNX1DLFH+Lejduc31At+JGntOw511djTkHPmHMg+5+vPZsHusYxqi9Uku24r73D1OQeA77+LOboSOZ8i52Y4trbXAYTxCBuqhpwL6r2RYCNB3sHD83oi0xRxMi3pYdAfreYcgKx1BoDYy+DROrsepbWH7mw+X7eieksGlnsXerzMwAOmt6r3rjcS3qXgnF2fZ9LaZ47XP2dZw/2wRpAMT0+E+JF2X90157nusAax7iiJuZ2l2DKl5me9YdybCuR8oWghlcE5d97E2h83JXK+0HfT2vXvivRr4HOnVyjknCUdVF2rRM7NwHj2NuBfXwTs4ACBkUzp8dZWjZgh5zabwBLqKZ+/9Un7PgNg0SVKd19M7LVFgWZuIH0mck7LmPg4n503dGnCmmJg0OCc3z+pVh2456ZTrf2u7wE/+lv1O+kU8pH0qZjmZSIyOCcsl9XenEZrZ8E5FcQ0aO3UJjaoawPKQIkM3CrU2o21IPHYsYSvZpufYX4EtHZSc+5MEJvfFejzoeENUK+iFrvEAIXxNaVI+2hqyLnu/xVhLMseAQDHnA+c+kzsbLKSkMSigC7HCn8Gg5ytUVMNUsKU9lWgKxTiV20uHyseU/ciT3XEXZhYl0QChq4l3O8sgppq+0aslhB/kO+v1oRLFa1diLdayiq1Xufms16xtbyOjBL4Vpko8xFUf5sYLIBexJ5/DQkr2RLBeX3C7rdp53iECYSjrdSAD37wg/jIRz6C97///YjjGG9605twxRVX4Pd+7/cwP//AKsqv1WqYmJjQ/j0oTBOEy/XXSsh5pFrX8MUi4oHNnpRvUiMF5+y4SZZbhJqUk/bm5OX4+vmfgQApYqPmXC5efNNXau3LqDl3BZRJT1/Akg4LVmVwLmrO2fnPD/jw5gHtt27epzL34lzNwI1vtJPNCCta7OdtB4U6q+X8qTM6YnBe9HhwjlC2JUQQsnZtK5RjnsEvZ5b5tbb7KbK8wDk+d5g2PkI605Qq7w0RmprM5tQv5N4K4aa5vIU9gtZOkZP5e9VGtvERwFPfh9sjVsMsBOEiIWhjQ3ooct4qB+c2y+AhFMGsDWkzHONGIYJzsy7S4vwVuXSuEgQo6kLxl9CsXMF5izsr/UVg/y+AKz8AXPXh8vvWnqGfLnJVf7dc8wMcwwXhZtt99Np87a1NuGnt05uBN93NxH8AHald2FX+DspeMK/9x+8HfvGVoci55pAZ80cTDnPZUOQ80cZX58B2UnMeqjHhFITrIRmMjpx7xhz3aKKgN6ej9jZmAC2liRpYM8Ge1b6FHvDcfwYuvwFYe7p6//qz9c+L43O6eL3G7qmHnOl9COr8zHH65yzOix/V1VpbG9fnj4nYl4JzipyTsRHEeiAig/MBNk2qYzQaBsPHwhwS1kOMTIjXGbR2RE2p1j83MnIuEtw6ct6lwXk2MJBzh37G3htZ7+99N7HfjTEuEON6yJBzaiKJOuF1UWtz4GB8nZPWPp9GTGDJJrBpsQ6MZ25bh8W8HiwhoG0d86ScsJulwTm7N9+8Xgc8iiC20tpFwNsQZTxBTJBofW6KemxpIpn6g78A5neULuHQ5Kl4V/oiRmkHdHFKbpu8/einOQoRnCcR+ohQCN8masja9B5/Zth6Efv/0a9T50ytRGs3giuq1m4ExAPPOJYF+Qts9cmmpT3te0Wf81oU2Nuu2r5LPAcRnGck4H/ka4FNj9Tfb9bjm8Z9Di8bGMi5fs0DxLp/86t/Bzz3k4h4BwprcG70Oe9z5HyqadDaCXIOAMHkhvKx4jF9TiTdEjAktTDosQG2x/CEaBHWrDXnQU6SNVe+H3jfaWjMWXR6skQrDdLPsaV/P7GW10O+5wbgw49lrQ+pNabLz+k+0NrZebLzyzly7mX28ZXENDgnyLllXpaMn+NCsbwEpDcKC+9BZssOznfs2IFHPepRAIBGo4HFRTaRL7vsMnz2s5+9f8+O2MqVKxEEAfbt26e9vm/fPqxdO2SxeLAZCc7zIcj5ABHGxghyniVyUd/R55varquBL79S1YlSo7R2AIMsL9d8EWfh89lF2NM6BQPepiz2cntwLmjtuaC1+6OrtT/2TSyL+hQDfUy7ZUdrcbfahPgGKAThFglyvme+ixt3zSuRMnpM7VrVtW/hyOS2Wb4hWIPz2rLrXdIuW6yyIFYK4+Le0HvkWwTjODtC9PY8x+cB88ZHyLfkNCAa0j+5lRLkz8JYmM9raqFsk7lHkXM/Ah7x27hh7DHsFAd9AAVBzi3OZH9e1Y9ZkHObZVQQzlpzrjsdNR6cF2YCxjYOixyFFBYK4TWn2Ouj0NqFsm9voZrO/oiXab9GyJxK7kPNZ2rXU012LYcOi9ZZYyW6qGylBrgRSts9oddiq188vF2NAQ05p8J1NFDXvyOOR5g31ppzivAl2nkODpFzorR2EQCsOxM484XAGh4Apz0+Xu8H683riQHbukCpplETayfYe/Yt9Nn6NbVJD6Q3OErFuAPc4MG5X+ToHtjO7kdQAyY26u+3OC8+pbWbdGozQDSTYeJ4eaYnboLIQM45ipon2DyhjtFqGIFjBXIeIkNuorGS1t7ADE+izvdc5VCGEymRQp3x1MsIU4mgaAlFzoe1qjSRcxGUxuXgHEUu11ZPOOYT6537ZA8x2v10ZOR8ITLKhWwiUeLZmS2LbDXnNDmbJyiKAt+6UQ+WMy9SZUoW5Fz0u9fKPkz2monsDtFiEGJQ6/hcsr2fiU4VknVz1cxlTwAAxVRJREFUKIkAeCjEd0UtXTgOAH79E8BvfAk49zfZ76Wg1qS1mzXnhNZurAX9UnB+H5A/kjDsHRGtPbK/DgCX/CnwhLfrr5lK9qbxIMzPB0QQrlXyn+YSX2+nyf8eBcxvGwU5l8G5VnM+0PcAwK6nFLf4M+V+oik4DChNEVlzPmCAGX3WQQ2Z7VypXfMJYOFeBKnlmWQD7kNbfAGRPLWwTldgERd8+xnAnuvLwXnULGuI3OfgnN0bUYdvKz0EIFlOMRLWJlSynEZIKvI1Y7aYYG3sRjR/FF/iQWbLDs7Xrl2LQ4eYQ79p0yb85Cc/AQDcc889Stb/l2BxHOPcc8/Fd7/7Xflanuf47ne/i0c+8pEVn3wQGgl2k0Qg0e6a8wneLxdpT8tC390lk+GGf9WDK2FGrVyaFWW0ljgcBXwWwHM6UexDC5jjdInVBfLXenzxDAOv0gHTbGId8NvfBs5/BTBzgnrdRM4BYH6XRa2dnb9c+LMBvnMLUx9fP25cm0nNIYvg1pXMEbtn1qgxoxbG1qx3pfHNtNVswZNBuaDIEdTbD3TaFyDRXUZnUsh5vkEF55lHNjxHfeL1OatJrfUPKXaGBRluZyGWRJ0kTYzM30tQQq7IH7H7k6UDJfzjOofevFJeHVHgyAMTRmO/DBeEE5TBgp53bYIFaKYVOYpEqf76zWUg5yat3bBdxQx+euHHSomSEOl9Qs4BYDNHz+fneJJFo7Wz+ytrzm32gn9lytLP+ED5bxpybglgqaI+dfxdyLnhYNTNAM1mFDmXwZRec64lDuZ2ENSEIOfCQdj4COBZ/wBMb2G/p72RW6kNNa3m3BWc86DS84EgxmoeUOydJ/ea0g+poj41HlSIPue+V2Cwn2tCrDi2TB+01pzXdORcC86XgZxTKr/n6dctxkWeYsME2zMGRYCJprFmViDnEVKgwRN4wsGUyHkLKzit/XDP4X/Q8SLOk15DT7S2DNV6S64rKUImEgYMr/d2IedRmdbu5Qn20B7MzRmuzmwPzrsyOB9tvQynCFro+fZEbe4IzrOkHJxTMbYswVX3HML+wzqTJM8SK3Le5X6MDM6D0Llv+ssMzpe4iv3aSf48aakZty3eXtSQwOMB0EIu9Df484waktbeTTLmyzZXAMc/UZV3lJDzUJww+9/0TSrU2ksAyH1B/khwLpHz0HfSjp2CcMb19YsIB5YSQ3vCL4vlmcbXlHrRReyJpFaZ1r5jPpfMSgByjYkDthak1uCcz3s+N3sZezas5pw8B9o1AwBOeBLa3hhuzLeoY8Utfb3qL5YS8DI4F8fuHgL+74nM/xEW2JHzkY2WP1Brzqh7bfE9Xxl+3X3M/kIJrLjvNedcw0cE54VdIDfne3bNS7FvrkMSVAZzYubU8of5+r6EOjr+kHWOrMWBS0T6QWzLDs4f//jH46tf/SoA4KUvfSle97rX4ZJLLsHznvc8POtZz7rfT5Da61//enz0ox/FJz/5Sdxyyy149atfjaWlJbz0pS8d/uEHk5FMsl/wjaIUnKua86kJns1P+9JpSYoA13RW6a1+5ncBd1yhU7kNldkky2WrBGkmHSnN0Re+iqf3Cv/1pc8Af7lVKeAKQTjfH53WTpGDl38XeO6/8OsjmU1BJV7YVVJrjwIf9chXqHM2wHd+wRITW6aNRc7MLpNr3bqS3bt7BHJuQzSCmltd3mGi7GBijFC9ZM9U9R2+H+iZZUAGTHOdBBu9Waz25pAUAfaOq/rhzCOboOEAfeWx/4mXDN6Mlw2Y0J1XZCoAtQSfS4mPpcLiOMzthNayCkDE61+Rp4hpj1abM9lbUImkEWmadT+DJ1sKDheEi3ND5OWp/xd4wx1OtXaBnNdrdfgiqKA158OQ8/6iHrAC2B4fjwv778f2qQtK9yFArlNJj8A2zbBjLi3wZ2gVhPNRjx3Ow0lPAX7nSmDzo8p/swnCXfJO4ILXsJ87hHVBS1FcNecGGtuojzBvKHIu270ZLWoI+yVc3GUXZRNjTdYb8zGd9pEdQXA+b6Pd9eZ1WrstOJdjntUSCuR8/yJxpOcUVTgNHcEgX9eFynmIDLWrPsj+Zks+2ZC5IFZzoT6hPx9zrSsJwhHUUzhfZi13rFPlNzZZYNRHrLVRsx6fWISUiIzxMSec9KiBlbxu/3DPopsAlMuNHDXnS3lAaO2KGpsiIIJww4Jzs+acI+dRgGYcYroZ4QvpYwEA9x73AuylwbmobXYh50WMxV4y8no5s47U2fqRvZxHPLsScm6pOdc+l+BzV+9Uop/yY4NqWrsI1ujcNMyLhwTnv/Fl7dclXou/forMN+MzW/z9imIN1a7NE/5R3JJq7UWhtAI0cyLngtZuIucDuJDzril6d1/UpokQrag5r0eB8m1MOrFTEM44R8S4Zc9CeT0fxnTja8oY1LrMkHP9mrfNZ7p/I/wIHpz3rbR2HTkXwflUg4wnWjok1rStj8HL134RX8oeq44lxojYV95/DnDL17SvU7R2cp5LB1gpCwAEMaIovI/B+cAenK89gySGymvCKX65xAMAY/ec9NTyeL2fas6HgVEeSdQfWmiXNKHwnH8CNp6H2y76+/KH+V7eQR1L3pC1lvhTgblmPARsRIUuZW9729uwYQPbRF7zmtdgZmYGP/7xj/H0pz8dT37yk+/3E6T2vOc9DwcOHMAf//EfY+/evTjrrLPwzW9+syQS96A3sin6yLF/sY81Bq099WOEYCjf1ARfZHb8GPivPwPANp9DxTgOvOxqrP7ni9iC8o03MYr7sz7MFrBjzrPT2unG4YclNDDJSHDuF2VaGjEdOacUppg5jH2LTgFdiOqTrFYW0IPz6S3A0n6O4JL2LNzG6xEGbb7ZpgNceRdzFI6ZDIHd5LsMQRU9ODeRc1fN+ZFtrNMTY4Co75LIuZqSXhAiSUxaO68F7w7wCO9WAMDNxWZ05wus5/6rJk5Czy2oob76WPwgnwPAAoxJr8MW7daMdYNoZz6WzNpFgJcT8M1X0NG4wFeEzB2c1yaYM9xfUA72iDTNupeosWZD2ozNX6iHSuQ8bDCEwva8SM3X6ulxFRRSWrtNTdWPlDJ2b75UP5kE7N61+2kJdYuQwj9S5JyPeYGc99r8PF2t1FzIubAgYtdCExA2QTjaR10432FdD/6cyLk+f5pNdT9yBPBhWUco2mdDzvNEQ4jqnd0kaRSoOSscVTG/JPW/u+zgPPdjLGV1TMJI7PXmDEE4ixMjAkXuEIqacw05X3MqE9+KmujkMSypJDWW+L1e6x0G7v0Ru67HvdnyfkdwLmntJnJurHWmgy7em+elPUTeW6OOfV2DizghLAfnFayq2Evht4yAj9LaOXI+56K1m2u8+C5xT/jYaKekjChP5L7HWqmJmvPl0doVcs7Wqw3TDfzhrt/CcY9/CW6rPwzebW9Ub57k3WQcNed9xGj3Rqe1e1S4KxvYmV9irtiCc5tIpPxzHxfe9HY8MewYrw+UhohNEE4i56Tm3DDfTOZqgbZX0qRZSPgcmCRjvDbBfB5uG70Dsl1dETaQw4fvQd3LqKGtkZ1BKpF0aeb5yppzh1o7pbUb86/Ul/m+BOcUOae0dhEU1Sd1P8tE6Q21dmFd1HDLngU89kR9Dd+TjWMdKsxgaBRBDV4QldaUu+dSnI7ymiOC8ySvQM752OxmpOa8TWjtJnIOoFEzgmgxzqIGaB6Bmm/S2oWJMRjWEYe+VRCuyl46eCM++LInoPHPT9bPl9paIrRL750XuH3uk5/G/PvaWHn/qUq2jWL8HAtzHoR1LTHlEzr9P8+9GLj7Bfx9/HOn/xpw+q/h3hv34JiihSmvrAfTKWqYzxuojOzCOlKEjIE4in7Ng8yWjZwff/zxmJubk78///nPx9/93d/hhS98IU4+2UHBux/tta99LbZv345+v4+f/vSnOP/883/p3/m/bmRTDLwCd+1fVNRjvhF0xeIexmhScZ3rPg0A6HmcLpm0FDVGiNb85B+Ar74W+PvzSrT2JDVo7bXxUkZ4kOYyY2lmzk0byODc1x2woAZMWVrbeX7ZUaP9gkXwIITTFnaVM3Ng1HZxHVnSxyDLsXVlCxOxQX00nRLiGG3hyPm2g46acz9k9NEjbG0zM0mcVxtyHoQYlNTa2bXOdxNc4N8CAPhJfhp2HFILXKoh58QhC+uYGVML6zy40yPQKMsGsZj4WILFiSpyhfDxMVnjyHmITNX2+6F+3wQC1psntUhDHF5uNaRkHgwXhGMMBQvrxOakFrms+9wwM6XouJTWbkPOp7co6nV/oRSc51zcqDMoO9WBdx+Qc75GbOLB+aAj1NqJIBy/v2lRQWunZjr9dN4L5DyM1XgX4yas6Y6AS6HdoDW2SHCe+DXkRbldjJbYkcg5CbayVDvPif5eo+ZcoCkOdDftozCFnIZYHrXK5SYAcxwlsyO0B8RiPIngfJLUnAt78nuA818FvOIHWMwd+XPxrMwk1dmXyWSRVmZmO5cwVvOxObOM4DxSCRgqCGcmPuoT2nFW17gOCSJMlJBztyuyPV+DYEwg57MsaCaCcGJNO9wdNTjX2WfCllKPIOeE1l6l1m6acd8ocg4AG6Ya6CPGDbVzsaddYC8Ici5EqxzI+Z5iBRZ76ej1ydq5FvZg2EVrH4Kc+3Pb8Gz/+/iV4Crt9YIi53tvlONdJil8fh8raO2RWT9KrzdqlNbv+ZTd2/WT5HMGch4hxfHeLnZp/L40ogCerDlvIPA9xCEbhyXFdsC+/wPqfEzknArCGcF3x0TOo4bqolFltmdIxrdUa6fIuSmw60f6fJNJhkBbA7oFR87JdRdBiD+4ZSt+kW/GTQ1VSqeZwUzzxBpOjp0XHu45OND7g/O/RyHbB/q2Vmppl/kAfI3v8MfEBOEEg8FopylOi7IpAbW3VCRGfJfPIO5tWEMc+qon+4h2W34MOhGf+y5aOw3O6ZpQ1YEnHlPJc9pO8/4wvh6WxNcMLYuwNYmCj68JLAE/+xj7gzF2Z9t9zBb2a+mghkPZkHUuiJHwEs6RxGUfZLbs4NxVV95ut1GvP/SK8v9/MSMrds9sWzmc3Claynh9Tq1hXVwEYrdvoa82KrF57LlOvVHQdvkilOYGrT0eL2WEO0mGhNecD+vFKahJkanWHkRM/Mg0a123cKRJzbmoGZ3fpQIistmP1yPVI5x/5omnrIZnop8m5VEThGOL96GlAeY7iaXmTNStHdnCENWIo2GpOfeDsFIQ7pH+zQCAn+SnqAQCoAcOdGyEsVQ2BoAl3o9StoeyBJ+LiYelwnF9c9u1c69J5DxF7BE0jTr9IlGk0dpHS27UMCDI+SjBeQfX1V6B07vX8PM0VJqpZYmsZz9m9aQKuLVWapaxvuJYNb/6i+XgPBTIeWZBzjP4R5rN5uvBJi5aKDoAMEE48YzZWs3U2kdY6k26rAs5F06NRM6NNcgvO1zsswatvaHuRx+x7qgJowkDF3JOnOIV6T6j5tws0eG/izmbdhnatwzLolZ5XgLQ+usGEa/BNq5JBufsutaMi+CcOPYT64CnvAdYdSKWcst6SD5fSlLxwOiqew7htLd/C/985Tb2um3MBzXgjF9niswXXl5mNlGj801cG6DT2sVnjjmPKTyf+1Lt+U8GbAz1iwhTTeP4T3wHul4Tn01VgJJtfRy+nZ2LN6cvRzTO143OQf68uR8SN6Ug3GzHgSaV1nijtIHbYuqTVmqK1q71OY8a1X2eK1qpAcCGKfbcds11sW+hrzphAIrW7ij/uqPYgMV+6kTWS1aqLbaMpWs+AbT3O2rOCQV9VMsGek3y354J9OZlsBsLplAFrb1WM/dZqqHSKH1uMRXIObleS+nSSR5LJme8VKQRB0yRvTYhuyI0Sd15yYbS2skaCehBl+EjiDp57di/9lHgojcDl/1b+buFmSwCQGoJFbd/GxszVguttVIrBeeBPk7pz2TM9FDDLXsWtTk8yH384O5FPDN/D6Zf9I+OczTuvQgWyXPsI8K2Qx3ktH83/7uoOV/0HWK2SUfuf92U15w3SNI46egMJm71KND3GImcu/0PVXNuzDnpc9YRB/6yae19ROjl5c4QmtEWtfQZ0VIv0+g4e9xbgBOfDJzydP09Z74A3z7ubfhA+oxlnbNYD73ImAdGcF5rjNv3G2P+HGgPcKCYsn5Vp6hjweV3CmvOyH04qh0ZQPZAtpFp7a9//esBAJ7n4Y//+I81SmKWZfjpT3+Ks846634/wf8nzUiAbNu/CDR19G8xjbAWQK3etG5yqQzOe2Vk0vNVVnwXD1wEcp7leh1QbazUZmm+k2CaL0bhkHYfvSwAkJX7nIc1ReOjZgvOpSNNHFjRC3jhXtXvdNVJ8s8T9RA7+fAOkcJDjktOXQtcZSyC5qJIvr9VC7FmooZ9C33cc3AJZ5mKmaajv1wLauqemP8DCAK3IFwxtwOb/ANICx8/y09CiwbndAOiYyOsSwooAPSiKaAPgpyXNwiGnDuSbiJI4vdBtIXTkPMg1p07qeRL6p9GpGnGIFRLa815eexolCkHYgZAG1tb1kwDqUDO59R7bLT2FccqR5AmHLgVPDjvDFIg0MdJgOw+IOfsXESvcz9ZZKnW2lgpcSH7nA+zEnLeL/9M750IzqO6/vqItHafPK9eESKGrwsJAvraZas5z3S19lX5QRRJj7l8Nmq5BTm3OkYVloYtJDYeZJ5Ao1MKsSGqa2Eg5yKgOLg0wCDNJXInTHacME3ci1ItOLunH/3h3egMMnz9+j148SO32B3QIGaiV5f+Oftd9OoGSiwHHTkP1O+0HaW4t/UJ4Le+Sd7PyiU8Tkm10tpXHo+3nfhVfP/6O/GC8HvsfZsvxituOYFd7gQP+JYO6kmjsIEZPkS6mad7NFGL1aj2jOBc3DNjbMwnhlo7D061PueeB7z8v9i9+tJvo2QmNViolAvknIvC7Z7rYWmQoqA155NcYd/CIlgIptBFndWcW5SbrVbqZ21Z9676CLDzp8DJv6q/TpHz2pjOIKqwIktYkuH8VwE//RAL+g9vQ2fA1siaqDmnrBbDZHtRed5kXY+aJR+hjxhjtVB2aWEHKQd2mz0mCiuC83oUAJe8A3j8H8ngqxkFmEOikjGu86C/+0ZwHtbZPk0F4YxgpW0m3cIGExa9+K2qHaLNxteqvXPdw5hS9+ydwMG74H3m1/GBaDN+ZfBuIzg37oUnSgwF45AG5zW5vnYR464DbfRzT3Ln5vglvuqi47BhhUOoz/w+sYaT+9dDjG0Hl1B4ZH3z9eD8sGfpLgDw4Jytsx2+9Ew1Iz04N9XawZ73EkXjY1Fz7gYVPTEXzWdPACGGnC8/OO8OC85njlc/0+QAFUk1ja7zzRXACz8H3PZN4JavqtcbK/DT6GnoF7vLnx/BfDPwjpooglgyD2vNcVbGYIYGxudm233M2ou2kIUNLGZDBMbHVqOPuwEsIjZbcz4EbGTk/Nprr8W1116Loihw4403yt+vvfZa3HrrrTjzzDPxiU984pd4qv8PmVHrtX12sdRK7eqJJ+Cq/CTsXPtEa5Yq5yjY/oVeWVDFhtaJmvO0QJ/WQ9XGS0j+XHeAnA8d0Q/bZX1OVS3XnEfAqhPLH7A5HrYsnEDO994IzN7GNvpTlINBae0AsKrh45xNU+VWbEO+X6Dn98y21QItFmzhiB0hrR1hvZx9J5tJEIQWQTi2O84cZEmVu6ITsIQGthNae9+FnAcxxmuh3PyS2hR7XSDnNlp7yjaSwoZUi5pz/lzrdXY/Ai9XwXlY0++poNFSG1HgKFomcl6yiuA8Jw7/1jXTpOZ8CK19xbHKEcwTdS/lSbOx0e6npbEVIYN3H4PzNeOs5q0uGCxROVk3Us05UHbmaZtBio6K9wlWQdjQP+uisptzm5znUh45kPNW+f3Ukcl1tfbIy5Dsv119n0thOSQJvyMIzksdLQC7EJG5dhnI+XQzkvNRE4Xj1h44xocLOQ9qOLw0wPdvY4HI7fsXGdvN5oCayawqtXYt4RKR4Jwi5w6EVbazY+yOuNbABceuKL0tjmsaPXTgs3MOfQ+RoLX35xUSHsRAEKIZh2jGQXn8CMTOpLVLSqo+3g8NakjF2pkNZNJhUIQ6kjq5AVh5guNa9XtA1doByHZq9851sXe+p6u1y+C8nJA5UGMss3YvtTPObGauczbkHGABHmUIAXrNeewIwizmZQk7/6e8RwUX/bak90v17iB2Iuf12BEEA1bkvI8Y6yaN8W1ReN/Eg/M0ULR2dnw17htmOzVqJeTc0IlJLcGuWENN5Dwz5i2dn1VMBcGuAJTw4+ztEkBZ7bH1pRYGKjiPjYStF+jj1IGcp34NaV7grf9+i3wtQYBHH78Sr77oOKdOxIGBcZ8stPY+IvSSXPWZJ3+PeIJyznMg54MlOTY7EjmP1PcMlqw15/UwQGqjtVeBKy6lfrGmhHXEYbDsmvM+YnnuAOSadMibwtuTl+DWx33UKAOlyLkjaQHYr8XUcIjqWOqnRyxi55via36IgqwRjeZYmYkDlObP7GLfiZyjNja81/n4Wvxz8Cx8JXsUijVnjHDmDy4bOTj/3ve+h+9973t4yUtegm984xvy9+9973v41re+hQ9/+MM44QTHhnXUlmdGMHza3i8Di3vZL3wB+1l+Mp47eDv8dQ+zbnKizkejtVcZX6zT3EDOLfXAc50EGV9UvSGU0F5G1drJwugFwFm/ARz3BODUZ5LzqEDOqYmac2EnPVmjb43XIo16+sQTpxl6P8wRN77/2FUiOO+oDU0sRPeR1o4wLmffyWYShm5ae9hhYje9cSaWt/1gR5acDKjKqUZrr8PzPKzk6HlR54u8o+a88EN0kgKAh6KqfY9EzrlaPohae1DTn3tzBtCobPHINM2wSKqRcwcSY56n7X2LbZXc2LxyUm2AFOEX94ciWyuO5XOEX5OYp8L4RrbUT5nzveYMFJx6HN4nWju7D77vYdOKJuoeoU+WgvOgLG5ksxKtndacE+Tcstm7kXODDk2NPIdeEdkdnNWk3Yqt1jZL9CQCgPTe69T3lVqCGch50mMBxTJsEE86gvOBs+WlNMHE4GuG53lYzUXhtLpzbu1eiqf234WPj79K/4MUhDMCubCGr9+4BwlHHeY6CWbbA4cgnBlsRPafgfJz9EhwLqn8juBPzG/OtNmyZlpHObkx2qn6noFXk69r1NxFRuOlSdGZMUtZhBinZnBuKYMCgANJrJDzXIl5MeTcSOo6r9UuCCcCwY0cOd91uIv9iw5aO8rB+VyDrfOLvZTt56/7BdMmqDJzvlQlL7kzPVfwe5anisFXK/sATqPJeuE7DNry/sng3CWYCJXkVedt0NpLyHmkU9oBHTnna/kxPgvORcmfbU0Uiu2i9Zt+Hsa66kUs6WBDzk0za84z3/33KmYE7de97iz2/+ztckw30YfnAdHenwPffhv7u7k+V9Hayc9hjY2FH901J19bOd7Ev/z2eezeOc7z6t0dpLSdq5iHlNbOASCN1s7XGCEI1y0ie/s/goyL+aoF5wRZ1xMvRm24rDmvCs6FIFwAPP8z6nWKnAe+3hJuiOXwmI4FTdDw4w2KEJ/MLkX/uCfpH6IJm1Fp7cLM+RvWsTTI7MnwEcw3ae1BhJzECVHDsV6YwXlFzXlYa2EBQ4LzsTX4dHEpLk9ei1Z9CDDzILRlP51/+qd/wsSEI6N11O4fM5Dz3x98DLjx8+wXviDummMO6fopu/p0wBfWfYs9+wJX+oCitfdNWjunn3djluVnwTkfOrb+x8SEqjtDzmlw7rFN47IvA499IzkPS9aYOoPCmiv1xMG5ejs9Ezl/wolT/HyHOOLGxq+Q8yV1bmYN1Yh9Z8vfRQJXS815ENoE4dj9zjlFdHycLW6LvRRzHXZt/dyBgPHFUQgoSQXkroPWHtRUhUWVaBs/94aV1h7pzz2s67Q3ce/OfIF2LOvXoFDohFWtfUhtpKw5LzuFhxd46z+ECMOA9Zf2fODgncChe9ibROBFmRIrtrIaa5EAW9CpYj5/b2eQse9/5X9j8NLvstNFBs+mUD6KkQTe5hVN1EHok8Z6MDJyXqK1d5WDriHnxvuipuFcjoqcq7HZZ9r16m/P/Rfg8psYNU9+j8XpzdNSP99w/w3q+0rIuVlz3oPnapHnsKXmRrsgnE0l2DxnIzgHINupaXXn4rv6KW4utuDW8Ufqf5CCcCZyHuMr1+plSHfsW7Q7oC6BK9vf6HzzQ/WMtZpzx9yVyDkvR3AIMNUiX3MY+zQ4DyKVPBJlVmQcrmhZeg3L4NygtTvOY3+fJEO1VmphGUkdMTjvi+Ccaz4I5Hy23cehpQEW0UT/YZcBpz+7kta+OMbEnRZ7fHxNbmAU5yorqXJXJC95cH6oYOtYQWvORxTsBICDY6q0TA/O2bEiSmt3BOfN5dLaiwjrJ43xTQGJGQYabfRYonXgs/faSn2qkXP9e79w3T78yt/+UO3RUpdjOMDQTc2kGmW4jYicrz+L/T+3Q/aHbnp91EIP3o//Tr1v3Zn6MT3fHZxTUd3Vm+B5wIUnKs3seq0GTyQEHXv1T3cs2QEeEmAKH1NDzvlx44D9P0hze0/1gQrOsyLAWC1kAb1YDwadcqkNBHJOnrnw46p6zNO5ePJTGZgEaDXntVBft6xtNokliAF46GTk2vnxxPpT7hRA7meVIJyNxWnO35Aj5xrFf/Q5HpqCjX6ANFLzzXMxSUu09gEOwBGc18exOAw5H1vNQA8AzdrIFdoPGjuy1MlR++VaFZrGnaLd8yw43zBVziQDQMizV6Mj54TWrgXn48BvfAk47dfwg0f+EwCmFC5o7ahCzv0QKfftI5PWrjl7FeiavCA6sT32mYvexFpHvPirwHG60ul4PUIBldF85Ga++AwNzvVJvnUlc/C2zS6p8xcLmaS16xvvN7NHVH+HvKYaobWXg/PIipwzB17Ub9aa47Id0/ZD7LW/DX8LS0UN9571OgM5Z+/bwq9pbHo1e10g5wblvyDn4lWNIf6+Rl0g5xkiz0FrDyK976q4l8/4IPD6W4BTh4iUCErqCH3OS1ZBa19YZOhaLjL+Y6uBrbwn6k1fZP+LsUNriAXFVCTADOTc50mydl84C76kNEbI4Lnm+orjgLUPU/PMNPKsjlnRZGJ5gBVZGrnm3NxUC6WKqyPnxvvqU/o4c9acm5oN6jn0YdDax1aXuznYAsxcIee7Aua4xt396vtc1G0+BoqkC3+IboZpi81jHIJwSRmxGUJrB4A1FcG5GDexqUbroLXP9oBrth+G7wFnbmTz7PZ9iw5au1mPb1DXtb8Zc1i2UiM1566AVRxLOLSO99UNeqgKzvm4EIiRmGNk3V3ZipGbfZEFw8lU0RZaGcb17+1FuiBcJtTlg3INsjM4d9DaQ3bcqWYkRccAVl8bP+v9wHM+rlgQFlr7wvQpAMAE4YRVtJ8DwMbe6c9hP2++0E1rB1RwzmtAszRRfsgIyeeD0Tp8Mr0EVxz3VvWiLCtQtPaIapE4kgVDkXPjufUQl5FzmgA2ShBEcG5LWDargnPjfA/3Ctw9u4SfbOPJn2Ug593MeMZaWVBFoEETMlObeaBWAHtvkC9PBplii1z4+8AjXqafuxfofo6mS6N+PvHEU3HPu5+K9z3/4erv9Fk4zvN/trVxyKN7vAU5hwU55yZbqWWO4DxZkvtSCl/pV4h9KVmyqrU3YpPWLoLziiDQTJSJax6oNcSsOT9YTKAP91wTCuNLSTk4Fwr1pbFJ1xuj5rwYU8mT0WjtDbT7qc5UWwbAFFpo7Ulo6apimrFmzrb7OOBAzouJDUOR86y5WnYnaI3CDHyQ2dHg/IFoFX3D4YfI8kL2xV0/1WDoktE2odZgjom15txmfBFjgnB0ARtnQmu//k/oTbFNjk3sEZBzP0LK6ZWhbwjCmTVQ5DNWi4zMsuexjef5nwaOvaj09vE6+y5xLU3RX3UYSmYsICI4v2d2CYXY8MWGId5LFqOBV8Orktfh9uY51d8D6MG5hdYehJFTEM5Ll/hXj2HzCnaO2w+y164frMfD+h9D+4LXWzfet//qqfjoix+Ok7by/vEiYDCQ88Jn5xYFnmqHYjP+XJsNXiNK+5ybgnBBrDtOQhTQ9xllb5jYkehzel9qzi3va7fZZksTEjjjuez/G77A/hf3Z/OFwMRGlhgS7xfXtLhHPyUenC8Rp1o4Zr5XSEr1T/OTsX3qPPXB9WcDr/ohdsxcaL8WEpxvnjFp7SZyHjCBoGFme8aiblGio7Uy/b0+adAmXWrtxtymyLlJa7c9XxfCwcfEzsbJ+utVyDm/R+mgN7QdpGkL9Q0VtHZC2wXKAbAhCAeo4HxvRXBeb5giPEIQTn+uP9vJ1oALj1+JC49nddp37G87BOEcYnlANa1da6VGkHPX2i2OJRBsF505CjT2RJc7uDKxJJzSRc5OIUmimbHYjZybJhxrw4lcLBqK1v711wH/+QYADDlP84IhefKalkdrr3Pn0fM8iZ4DwOoJgkQKo8/0SX8OnP9qLK5lzInFHg3Oh6yVYR142l8Dv/q3jIkyAnJ+mCPnWUrGMqHF5o2yVgAA7G2djLenL8UsFfGyIecg88MxDhol5JyMq9BOa6+sOZ85TvvTUsgCAltwLl6zC8JFWoJQrAH/dQef01KtPUSpNMHCZtL/Tq65Cjmn9cZRE1jJdXt2XytfngiJSOaGc/kxyT0r0drp/SXnIRLPphikMEf7w7vnMtySEvq9NTjnpXUVwfkgK5hInvxukuTjCcEMARODA9S+NOhYa85rxvoiz8u21whzBucKOY8Dvc95Dh9tl9I8gIT7Vd0kJ6wiRWsHLKwO+owIrb1fhMhqxpgwzVwHwzo6g9R+L0awUk9xP0TfJ9/rOhaZA51Bis4gw15R1mOye2eOR7eoBlu6daVfNFLZ3oPMjgbnD0TLq5Hz2XYfSVYg8D2sHq+xBfPVVwJvuEO+rcGzzweXBkhdlJXVp6mfSc15n/bgJJscVRKWi1EVch5ELPsJIPA998LuEr6gRlGzYUEYVHAuES5xnsusOd8004TnMSf58CP+D3DxHwKnP4ufE7vHWVA+t3Y6wtSiqLJFEC4KI6cgnM/RwrgxJttp7eB15wu9BBkCllHW6HLsfFeO1XDJqWvgC8pw5xCjLxv3JhdCb1FQXXfIryGOBa09ZT3J5TUaSCqlZc0YvTiJw5nZnH3hdIzQ59x1njZko9thDrtHnZOTnsL+n72NoW0isVOfBC6/AXjep9R7xTwxhJXCOns2S33l7C3RWJDf87cmv41rVj+bXAu79szW+xvQSl82z1BaexlZgh+UAwCb2TZ2gTpKVCguI+eNqTKzRX53FXKuq/fqPW8t88dVG8hRogMTpxvHj9wBKD/fPOmqEowRba6+0d1KzXQKzXO2Iue85ny+HJwf7nAF3LqllAAoOZb/s43di2eetQEnrmFj8o59bTuatyxauzGHqVq7BaWyfg8RUbJZPfJRUFo714iWQZRwSkXpiFZzXnPXnJtGUC9qS6gj98rPVTxrLWBzodBmcM4F/WggKBTbAZWY0Y3Mn0e9FnjKX2CM1+i3aXBeFVQALPlQnwDO/U2gNeM+51gJOC0EbG1ONeRc+QBZPGU9hMePrd0jgpyL4DwUwXkVrb1BX/cMWnuDqI3zwyPCuiljnlFnf0ZHzg/VWdBpc+grae2exxg93ETifF7gE1Ks1DcQ5kjbrwrPou4djoicryEaHFFDXRsJzifDVLG7xByhz948P0vpGwCGzJvno7Efy3tKynuQ3J4T+r2F1p56FcE59zMTk9YuEOLegoacy+Bc7EsDhaxrNedRoAQfASIg1y6dg/MaxXM01NoT8jwz+OgEbn8p80RwnpWDc36calr7lPxxEU0kIdkbrMi5jdae6clwknC3PRNqkQ05p35q1LDrYZCxNbvI9rV7gs0oLvoD4Ff/Rntrfc0Juu9r8QU7EQvOQ9+ToqoPJXvoXdFDwSpp7aGsN187UWciZwBDlchCVs87jEoOYCF3IE4i+ADk4E/SAn3qeJKgLApocC5o7WWHkp5rmhfqs3Sho8E5/dlVt6ipmQ6vLxGCQ3KCi8DT1g7LOGdqtTCQSMedxQbgojeqTYu3BeuQZIZwUuYHI0ytoAZseTRzfjY/qvT9URSWEbq0j16SoVYwj6DeHMPmFWxx3n6og16SSzGoibrhAJnOkAjOu4dKOgcAJAWsGQf6Au9qKcfHUOQZyLlZ70YdpxU6qkGdGLGJaZbwTfF+Vmvv9zr8LeRvjWlIR5lk6xkbINDHs0PXIa6z+7ZEBKWWEjI2+PzJ4etaAfxcU5OqK4wg55tWtFDn97sIa6Xr80ZuvWQJziVyTnr4lpDzKT3gomPJ9yHv4TBae+FYE+T5uVr68ZKE8Q06Ta6yzzm71ry/tGzk/HC8bgRauwjOjTFpQc4FJdcmCHfDvSxoOm7tjP4HB61950KGRhTgyaevxfGr2di7ff8iCpvTVkriOJxw83sCWnOeq4TEMLV2EZw7aI8CLfp580Jgw7k4MHYKf52PBZHUW9hTOs5MyyII50oonvdK9j8Zs1nUQg6ftQAyTDixHSoSNiKtvW8IwgHQkPO1tuDcMvYn+H622CfML1sCi5qrL7dpfiiD8w4PzvOE1JyT+yg7fJinzI+tCecR5LxrBucVfc6bdUO/wgzOAe2zvcKm1u6mtR+IWG2/rdRH9jk3BQCFEVp5ghDPe/gx5UDbC7T5UwSx4fuYnQU8nU1WtV5PbQFe+UPgtVezY06sY6+T/WAyGCgdDjHGAzM4p8i5Q8xTIufDqezCcp8d6/Z8o3pRtlIrI/SlUhSoVmqM1q6SIfLe9+ZIcB5iqiEYjEQQzqbWHulBtEw6mYKRAK7POWhA9ZAAO3Ju1Jzn8NEL3ci5SJR3Bpm6JzxBIPaVusl0cwjCtYuGjlrb9nA/ANaQxHVUR7ufGvdiBFq6eKuZKPZDXYQ4agIXvAq7nvYZ/X1kzh5os71u5Vgd3sVvYZob0jysXDGll9ea7fkAtEPGGGjGI4IPDzI7Gpw/EK2K1u4F2E3F4LS/qQHq9Rewepz9/XDi2JSPe7z6mU/IgSkIR4IyHTkXtPbRkPMwMKl7ZZVO9pn7Bzk/cc0YfA+ql7IILpaJnANG3TkAHHsx8Mx/AJ78LgBAOydUdK5kuWSqsQLITRQ0rAEnXgr8wQ5Va20g5za19vluggbY9dQM5HyBCwYFvsccjargXFAUO4es92XAa6MakRGcm302xcbBEyvlPueGEKCGnJvBuXrvILBsElXIeVVNJT227bNCBT+mjopHFGDtdWzSLJsHAES8/2ZHQ85J/06eiMvgo0c3OBmcu5DzFLjtG8DBu7Bxqo6mx87/cBJYgvMRKV+2fvMSORfIfFzevE3k3CwdsYgdymNx68OgJduSLw7EVTg2rbEx7Kbq19Y+5/xcuFMbLu5aNnLeKyJJP9RMU2vn528GWpbgXKzTZs35IM1x4y4WNJ21xai9dNDaBwjxpNPWoFULcfxqtgbOdRLM53xd8sgzdrWZs/2t1EqN1pwP7J+R7+fvHQE5B4D3rXg78LLvSsEkJ62dJIlWjtXKav8GYvTR9Ffwp+s/CJz38tJ5ZLxm0g/Lc1uUumhoqosi7qK1O5BzodSvmcXRFEwwipx3hg3bklq745yTjgrOoykAQJZR5Fzdx37YsrJ5BONIu0ckOG/0mMJ+UJA11HE+LRqcm22/RPAR0LWjgtYexKp0itseHpzba845SyJx+GCkvjdFgLf8yslYNWWUDRrI9NzAww9uPyB/LzxfF+NqrTQS2J47CPZ91t9cJBwsPa/HKK3dcr/g0+SBwWik3UkES6CKUWOYxxOodxQ0OBfnQINz9j5bJ+s4ZOOLBeeKuiyD8+6cEoSDj0krcl7eq5mmhYXKbUHOL09eg785+5tKd0aYWXMexKWa8ww++hXBecj3m+4gLSHnScFa3YYmEuyoOV9EAz2PzHNXYP3sf1Q/t1ahU1Vz7oduUABAXC8j54OcAmzsnk9OGveAIuciOB+3rAETG7B6vIalguolla9LdDxoPQTF4ICjwfkD0ywopjQ/JMF5RYYr6cqN/0Di2JRbK4EXfZHVpE0zNLjUSo1koCl1RGY8q4JdP5Q155GZ5TcyyeozjsV/1JosbseuGsP333AxVkzwjVNkUsWi7aLIVgTn9/CabgQhcNYLZa/1hUydjx/GWDlW09kH3O4pDHVdcU1ajS4JzqNIz24CQNrDXCdBkwfnXtzCZq4ov+3gEua77Pom6iHLJo6CnOeJ3s+b24A7EPUo0BdvUy3UQM7DKkE4z9cDWRM5J2Oh61uQLzHerGrt5Nmd/6qSDkNV1l/Qwn3zHg3pnSrNoetQ58j5IMtlzWo7KTu3OXx0aWuVYbT2pQPAZ58PvP8c1D3lpe9YLCxU7vuAnIvgXEPOG9Cot/UpPbAz1wSZFKlAzotIF7+zJl9cyDmrZR5rjemtWapqzjn7JRrMYdorIydVlmSFGzk3VYLNtVzcR3KvFXKuB+c3757HIM0x3YywdZUxF2S9pD4P+kWEZ57NKKX1KMAmzqq5tXEucPZl+JfaC9WbS23mqBNuXJ9vOF9azbmYF45xJr6nx2vOXcg5F03rJRngeeglubwOAITWbkHOx2K9hhIo0drnixZuD45Xew8ZT0LQyLclGPiz1CjbroQX+XyS5ZI55kLOh9LauY3x4JzWnO9eGKKfYjJNXMnLbCABgX7E0Kg8TVR5HbmPfb9pHft+JJBzC6396o/jo7OX4df8/0ZAu3g4kPNWU71ejICce1G93JpPBHIT60sMiv0eS+AJBX1q4jlZae2AFpxPjjUx1Yzx1LOMvvOej5zMhQQhvvzze9U1mbT2MYvqflWvc+2Ep0ovjfsJobXz+6Uh5yThEdZ0X6y9n7xPMJ4q2E+GhTGrwb6rIDXnAmUm1yR6ZVcJwg2yQmdAjHOWgIacB5gSgnBiTS0ypU1DzrcsCEdq1A1LEMKjiQF54qJMkq/VQcxqzgs9OB9EFcF5wYNzC609RaCYQtToekye+WLRxBLIPHcF56tPZqWvT38/8g3nYWmQOWvOPc+XonU2q1lqzgd5+Tm2muVad2EiOF81ZlmTJtZh9Xgd1xQn4r+zMzA46yXWtUIqtT8E682Bo8H5A9MqBeEC7J4jYnCmXfw2Fng+/o+whiMy+weOTbk2DpxwCatJ45akhexByd5DkXM1ARWtvUoQLkSau5BzR/25K/Cmi84IwTnA6sUlci7OUzjPNpTQcWwR/O44WF7EAWA+0RGnrSubGBTl4yyghQWfBA825IAswnFkUWvPBpjrDNDw+OYQtyStff9iXzr4UsHUUnMuLWqq19q6yjigWrI1YqPm3HQIDGRU73NuIOd+oAcsFQF026uqc7fR2sn1HX+J3pfUOLZpNZQz7QBIexZ7HZs6gH0zrpENSmwmS4OixKLICh+9opykciLn1EhiZft8Xgpi/WBU5NwmCNdjc0agaMKZo4G8mawxS0dkcO5GznujIOcux4Orb4+Pjeu0dmufc34utTGA95sf8/SgeJiVRDOFZYMyYuNKtGqCcBzVHmSqVRaAa3fMAQDO3jRdpu3JhJh+n5rNBh5zvHIqT+B157cezIBnfADfyR+u3lxFa68K3P3QQM4T+2fMcx2KnPPgPGX3rGeizgIxkgkOSmuvlTsbGOO5i1gXdSPXL6ihIsjUz98SeLpolGT9oOhrnQSCGwlyPiqtXQSfNDjfNT+EBTYqci7MD5HzdazIyZwnicee5wjOJXJOae36/T/d34agIMkrR3A+1iDn7fn6uhGVUdhWy7JPrNjK0MJnf7z0p15aTpgIU7R2x7wdV8H52hVsrTlnq85q6WbAbFdhwgOEOnIOIzi3tcQb0cexIud+nyDn5WSGVipgztn2Pvv3uBKshnlRHWdvmtLVykVSjraJjQVyXhGcp5nudziQc1VzTsYbZ4JoyHnk68kAMa5/5b3sPqxSgqJJEaBVs+xBYr2VwXlUQs5z+OjW15Q/yy3gpR02WvsAoV3czIGct9FAmyLMVcrza04FznkxOqlg65HvoeCCH9iTz+KtVuTcwqYx92uy/oia81VW5Hw9GnGAVj3Gi5O3YMej3mVdK8R6LNguDzU7Gpw/EG3EmnNrcH7RmxhNev1Z0unb03MMXktAUepzTmntgb4AAahEzosgkvXP953WTuvQRty4ABUACAdSnK9ZNyu/v3zs9ZNuNWUAOJzq4ipbZlrWxS1BgMX6OnJuloWJXFscReUgP+1hrquQc0RNTDUjSXu8aRfbCCdswbnpFHueQs9pxpxbl7b10GjtLuRc0drP9O9ir01uLGsNLClHpZQkIWNhsaqVhrXm3KhpM6+3Kjj3RIBhIueCmrlUrUrtoLWHnioHEXXnnUFaYkRk8NHNyshlFb1M2q5r2DEKD9sOJSWEzFZHazUrct7VOzKI+Umfm5msKdHa+bWaSQ2j5nwock4RW2o8mTkxMY5ZDEHO6RgQ2hHyb+77dP1xr8T1+bF438a/xiDNRxeEcyVayb1uxqGcv7Tu/Oc7WNLlnE1TzvMy78eFJ63XKJEnyLpz5vxpYGvp3lQI8NDvCSKFpBcZSUgMEUkTYokO7YAaR4wEYt6V9dpGzbkwTRDOptaus1l6qMnWOwC09aHns/0gsKHLktY+QvkDFTnk5+97Outsw5Q671Fp7WOcujnIcvR58uLe+YrEuOdbVJqHlP3UJxHHXKgrJTXnZO3voF5mcwEII4sgnHH/J70l+DlBzh3JgkZM6rVN8TKJBKvPTow5krhnPAfYeK72Ukra4lXVnLuRcxVIb5hh12eurzftXkSflLVlXoS5jpp4hafXKFuD81FLkSw9r8e8gWwvqe6XofviCs5d5ipNMi2s48mns+u5euurWc24KCOhJXscfa0KzpOsUKV3gKK4E+Q8KUjNeRCpdUsE51rNeaBAA0DNj82PBP5gJ/CEt8s/pQjsdGnTh/BD1EJfQ6Ez+Lhx84uBdWeVPw/AF8j5IFPnwBPsCUJ7sGkmqPj4X0QTCxmZR0ZA/LXrd+Oyf/ypRKoBoMNBAm2/pXu/5yOrQM7rcYxOoSd7epbgvOR/2WjtY5Y1gNfHr+aB+/7FnvrsI14GwAMe9xbpTx1Fzo/a/56NSGvfYNacC+Ob8Gqelb+3Y5lots0bnH5r9jnnFh0Bci4/W6K1U/EnihiOIAg3arABqM1HBBjCkRyxFyMArBHBuUVNGQAODfSarC0rW1ZkLS0CZBOEAmcLzsm1hWGI1FQPLnIstLuyxhhxC57nYTOvO7+J16gKASGnEqswsfkZLcAASJr18JpzfeNueX08yWcBI055uv5ez2dUQ5eR5z9fNIFX/AB/mV+Gm/ItxvtstGdjkyoF5+5FXKqdm8+f1rFJNMIy74y6Rml5Ih1rodjOKGX6ueTw0c3L8yCxbXqm3fszAAx93nG4W7rukQXhaMAtPpP09Dku7rGGnE/px8mM4FyiN2YmndSNFpFOs3O06akSq5meGMeBgpyLrc85XTumVXC+EK4o91UndnjluXjG4M9wU3gGkswIzkWiL0tIKzVC+x7hOmy9zily7jTjPj3+NP0alGI7Q601ocpScF4uq7D+Tas5z0nNuWvtFv2HOfPIUVIkkXMe1PZN5NxMApHxOt20CMKZyHlhIOdkHnd43WZoQc6F2JkTTaVGg/OBouVT5sPq8ZpM2K2btNyLiuAcUOj5zsOOxPj5rwKe/v6yIN6wQKw+iZhrpujIuTrOEho6NVgcOrLUnBvfP4m2DEyqaO0e8RUKGOJlFiR4ckJPAlTZYW9KJX0sTr0Yax1HzXneUojoMSun2A9GIquf6doFUayvx/koyPl9oLVPeB317CzJDBaciz3bGBNP/wD7/xkfNM5nNOQcYQ2/+agt+PfXXIiHvehdwBtuZywGQAcealyQ0xKCaIJwx5wPnPQrbEyLfcZAziUQAag1wYqcBwjgaIcY1XXhQ1dwbhEYZYJwOq09bE4Br/wB8LDnlQ7h86RXN8kUQNY5yL83tCaNdHX9UK5ti0UDc1pwrifY//FH9+CHd8ziS9eosgrRnjOKyfXTtdILqoPzWoQ2dCbr4cICTpSC8zKtXQvOn/4B4KSnAo98LQClxXJgsQ+Mc5/x9GcDb90NPO4P5FpztOb8qP3vWSVyHoxWcw7l8O1oWx5zbdzeCiMrdOSctlIj2X/pSFf0DS/IYlxGzilKRp1yRzAS6ovByCY2JanWzs/X1WbHcux1pCY0y8sSJgf71OGtYevKVrkFGlg2trZqK3mvu74RYEJegi5YkE2xt3hACsKJxVj0OhcCUhMN/n4NObc4Qy0uoLVYprW3eXBeH5XWTs593OsiH18PbHyE/l7PBx77JuBhzwde8rXy+ZDrPJw3ka89Ex8cPEX2PFbHGUL9sqiWV40bJ61dCsJ1FH3c1uvXaNcjlPyx4eEysys2xZIYC4AMnhQ4ASDvZWJTzDGNBOc3757HP/54p0bfG5nWbqOqpz3CjiFCRTS4HIacX/JO4MLLS6rJuiCc0efclUhx1Z2D0VvnfHIuVlo7eb4EOd87dnqlQxzyQK6fZhiYwblwCK20dgfaajhRa43gfN9CD7vmuvA94MxjpsxPk+vR79OJ6/WxecIajpzva2OQ5rpQZSWtvQI59wNSc57qXQxsZrJjHMi5qjln+18vNWrOzSQQuYdx6GOyYSbWTFp7DYPMjpwvcWczsgXnoUDORwjOyfPoWpTaAcD3Pbzj6afhtRcfjy0zNnZQeQ8MfA8tsY7w4HzHnCMxftYLgbN/o/w6DdAe91Y2J+k+XJ9ErcavPyeJJpKYbRd1a+JZ0JR1QTj9/k96S/BywixxBOfUcs9Rc07G5/SEu75X2K5HvRPdIsYfha9j/aWhxhs1KQjnYEnsztR3rZ+ZKJ0LAKwYq2PdCuU31cb05FoBPZgbmdZ+wqXl1yy09mnMq18sZQAard1MXp5zGUORz36R/roY10OD8zo8z8OZx0yxBBT152ig3BDBua2VGhGE833gBZ8FnvIetc/05uTYTGmfc0AlSiVyTmrOowDXF8fhhvw44Iznln1NwqZMEcr5ppkFOY8DXQU+L3yFflvul0hQdQaZ8q87h9g1I1RMIWpmaz4RnKOpMzeNpO+BRbZG/Piug/I1ARKEdP5R8MXzZRtdmzVqMRYLvfXfl5vPxo+y03DdWe8g57JM5Pycy4AXfEbuF9Mtdg5znQR42l8DL/oSsOmR8u+dh3jN+UMz5fBgt4rgvJ97OMwpUsODczbw9y6mbJEWyAUA1Mp0KIAtiL7WC9Ku1m7LeJpGA8rIVJ90IueOidYgG9xyaO1iUUsHei9vV3BuOfaqsRp8D0jzAgfbfclIEHagpyNOW2ZauN1Kaw8xvf544Eb+grX3sF7b6YcxkAJZNI6wz4LDy350ifLf+EIlkPMdh9gzVjXnFYJwgGpVsrCr9KellH3JcmntwopTno5Sux/PZwmBX/tw+VwA7fnPpg3p4PZNev+wPudhvYyyjiAIV7pHsua8LTdQbSwKW3m8/vvrbmJjrT4pUS9Bi10aZHZau6Z4ugxa+86rALDg/Na9i/jT/7gFL6xFaHjsmjatHO68AtADxtYqls3vHiI9zol4EF2jhiHnD3uu/fvMVmpwrAnaObrXPC9qIm2shHiUjNZekaAhyPnBqTMkemE91SgGkKOf5Kz0h66REQnOR6a169ch6M2idObn29lcP3HNuIaalsy4T56xphy3iim2z3cT7Di0pCcVXGJ5tr+ZgnBiLhVZdbkHUC6XcCLn7DsEYl6iH5tJIOMezozFANX2M+5FVc35Iq/bjOLyGhkIWrtLwdthNqV2YS84b1PpNWkO1sh4PeK6BCnSLMe9c31YcsAVpWHk9VUnAac9E7j534C57ey1+iRq4vrzFBBrD1n7F/I6Uxo34xqJnFtaqXGbgkFrHyE4Z8h5Na19xlSFtlj3rN/C6f+1FWN+HScN3Mj5MFr79XM1CB3yUCQ9jUD61A3TwKJiNUxMTsMnxDSGnJNnbBWEU/P0uvxY/KA4G7/3zPeWQ1kLrX0iX1DHCCx+wDBau61Ea2Rae8UzJddU511MbLT2WNacG34wRc454qzVnANDkfMMAZ4++FPc9axfKRdnxDQ4d9HajU/5NuTcU/XqlrkskfNBBkyK4JztPYNihJrzINKQ82RgSV4BKIpCtiz72bZDSLIcUeBLOngcR5CNSjS19gBFEMDVxCSOIrRpyaEf4sCght9I3oZ/PIlqmhjrvBacs/mx0iYIx022j+wlwPgWTe8BYH4UALSO1pwftf81q6C1H+6yBWu8HirqssMkVXKxV1aTdqhLD7IcHm1wQSZtbOtzXmEFoWSHfhVy7hCHo7ZaiXWMXCcFqAUhGzCxk2zAvuPhL7W/33LsMPAlxWaPhdq+v6dfy5aVTWurpTiOEc9sKZ8bNergeoGkC6aBAzHk2d7NBgIjx0aVIBzAgjAAmC8H573cQmv3w3K9vtFKTb686qTy9xkOW8nIBj6b1qWIWmJ6ocPU2sNa+XorgvOG70LORc05Qc5FnT61Uj1sQ77WkrT2VP5vIuc5fHSoWrtAzitINOrDvMd5UMdMK8YTT1kNL1LXfuqGClo0NbpBi76os3cQ2jK5n1RrwsyQr33YaN9HKcAwaMlHgJwjrKNoribHCMvP01FzvrjyTDste/OFwMQGJNMnAgD6aY4kNdTa6X0TQkHDaO2G4yKQ8/285lzWm28e8uzM5JeBhFHF9mt3zOmoZ6XoW8V98yNyfWl1i0HAUvtsd+DdgnDDa84BYGWrer6zmnPyPMgzWMjdyLnP55Kz97XDehX06UpzsMeELsFiP8HOw130XSUvriQJnb+SAaOzZep1dv0epbX7oZx383ndqrcgWlDqtPZyzbk2VoYJ1AHoNNYZyHlTfKF8acZsZWaxRhwi4/XmLkYDex8XhHMkYn62n8w3S10zAJ02DiBqTuIcUpqSwzOQ83UoGRm724u1+Ovk2eiEFkClNgEzUzJR8ODc1X7WC0iwPaIvZXRkcZqrC47xXRNjYwh9D5HZzxtGzTk1gpwXmlq7ZXwMFkvnS1XQe7bnu/JEYMtj8CPv4RggtAd9pY4joVUQrgo5F9ZNMuVfcN9igNA6LkvrL19T22jiQCL8r5q2by70Upng6Awy3HDvHADlh9RjS1IDADwP0+MVfpoXoENbcvqBLLXRuiaY6zwNzhcrWqlxG7d0qKAmEoHLXl8fJHY0OH8gWoVa+0EenG8YgpoDkGrtc50EuRBnEUipQ8AqzQrswkp0N10EnPKrenC+TOScthMJqoJzTbndMdFWn6Z+dtU22kxsKlkfOHAb+3l6K6tdecMdrI3cWS8qv98w0e7IFpzv7ZBryzM04xCx2W4CQLNR1ylsto3RoJ+J4Dzzqimjm1boDrCswwoiyM3b5hSP8eB8YXfpT4Ka34gDNQ6ilrXuip2v/ronKPMAE1s5+WnASU+xX4cwsrkczJo4uMSCwNSvqI81z0Oci+8PFxnjJtuRlQThSCu1rkDOLcE5oBIdhgk0htac25DzpZTOA07tG6XmnNvGVStwzR9dgo+95BGo1y3148OMjsd1Z7L/D9xKkHPyd1ORHWCtWp76PuDM54/2fb5SYu4XEbLCsSZQGxKc+zS7nmfVpQ1EJ6C/+kz7fXrmB4HLb0LYZI6xldZOEVzBTrIhVtQMFHitoWvxc15vfk5VvbnNLAGPUGy/duccChAKpklprao5NwXhxPpNW6mN0mnD9js3EZwnWYEsLxTyLOjHFbR2gCPn1Iz5Xqo5J89mjveBr9XK989fDq2dfp+ZXBjZ7HOetlO7a3/bnSB3PQf6vMVYj/XgvMbp6Xpw7stndjirWYPzOBZlH7kq/Soh5214lFlSgbK+e/Vf4r+zM/CTs98zFDlfPTN8jgiK8iDLZUeEKkE4l77AtfcSaobYD2xzhe6FtXE8/hSVNMzh6d06DETQPGbBn7M1SPH9UtJqLONJg8gRnFeptbtMlq4NCYSqkHNyTc1mC5962fnY+LS3shdOf478m/Azk8yBnKc95LxDR4nWbib/ia9IyxiswbnvA7/5dbw6fxNA0W/tPSatPWKt1AxBOEm1rth7O4OUJLDYnJlHy1Fzzp+T57Pz5C399hVT2N+zzGUoSruwKzm1XZTX1SM7QxZegLhWsc/6Abq+7l8s8mNqLC/PQ073I753dweZfL9VEI6bCPQXeon178Kfsj6nh4AdDc4fiFaBnN81O1q9OcDqjmt8oVs45QXAhoez1mmAs/UTWxA9zD/788DzPqVl8WlwvhzkPAq8cisgF3LuWvwpcm7pS+k0WXOeALO3s58Foju2mrWRowJlDsdmnXSeu6W/7aOnw5/d+FiZNt9qNIAZQn+2UeuNxVwgEqkfAc8pt4URmeoycs6PQ3udV9LaLcE5R//rUSDbTqEx5UbczHsn6q4B4DGvB57/6eGbuyYI18Iefr9zM+gYptbO0dx8FLFBALWhgnBDaO2AU+hOCsIJtfZ+yqihxHL4ej1wwTbrZJRWasI0Z8ygMY5i9NnI4Pw24F5Gm9eSErYuDWtOBR7x28OfMTU+JvtmKzXXMRz1yqxln4/WBDnH7iGWiHN1elh1Ij7TfBH+MHkpmuPTdlSIJ3mokng5OCdzT/bX5cd6+vuByU16chEoJXIEM2fvQg+DNJfaEZVK7TazzPETed35dTzgv907js17E7Gj118pCBequaQF5w5H32TaOFup6ciWqD2XyIhJax/T72EpODeuoYuaEZyr8zicsvsWW4LzIBTI+fKC897AjdBWWgWtHeDB+YF2iX0jzRWcB5bg3EDOGxw594tU+SGeL5/h4TS2qrXT+yZRZ2N/iz2DZUHHamMF8Ls/B950DwBg5+TD8eLkLdgXrjfWdXa+KVnL16ywl+hRo+iaSPgul9beSzL8Ys8CfphxVtFpv8b+N/cVU2G+No7Hn6yC87TwMeYRP2LMEpzTnuCcPr/oCFLMZFcrWyi/Xupzvtzg3E7hl7b2DPZ/FTOOri9hDRccO4Njz30ie+bP/pj8k0DO07xATjV+KEtgaZZfiiGgVur+Qu6j70kf1sWMKIpC7tMjqbVzQbjE6HNeFZwvbLgIANfWMPzw+WLMvl6YAMiT/gyDp7wPP8jPxLwQhDNYC2ZwLurOpZBaTHwLTRDOrx4Xnod+QGnwkRybAu0WVpA1tghi3LZ3Ec//yJUAmI86UXf7ZOJvCw7kvHu0ldpR+1+3iprzm/cuAQDWu5TaiXmeJ6ntd57wW8DLv6sCCAutPc8LpHwxjEwBN+h146ME5zkPzkOTeslOjvw8AmJGs8MH7xz63dLEIpP2GQoIMPqS7T3mz8Qkcm60UyuKArMdsnhwutWUJTgfb9bZ5vF/bgP+z+3D0d8ilzWQCSKG9j/+D+Wfs6Auaa1rJ+pa8kRTMJXBuWXMjHGnQdDAiIkApBEFwOpTgCf9GRPmcCLnxiIpAvrlGDnGAprYNcfudz4Kcl4bB578HuDJfyED6FwL+NyUvKgQwbmL1r5UTWsHgIkN1pfF5tGWfc7Tklp7Bh9LCZkTfA2w9Q91Gg1ctb62I25eNOkgnK2FXcB3/oT9/PDfIic8pMfyqMbv96rpSUy2yPm7Egou5Jw7Jitpa6rOITbfjn8i+T79XnzUfy4+lV3CAh+b48nvXY2jLv00Q5Iafc5pgCOQc3HP15wGvO5GvYwmiEvrr1hf9i/0cPPueQzSHNPNCFtXOrQxXGaZFyesZt91G1dsf+P4e4Dfv65ak8FcB80kl/geWnPupLWbgnAO5NxAtgQFXQbt9HOTxwDHXqx9fsUQWnsXsbOV2lzCn7MFMQqjI0POe/L87ydaOw8W2r2EB+cu5NzhWGu0dn5OJVo7u36/yJQf4gXAxocD8ThuSTc4aO11edpCqKk6UDP0IPwQmDlOrq0iodnup1ZBuF6uNDnGG8N9oTjwJXtPINC2IEg8K1si5pY9C0iyApdHf4zizdtVd4dSIssMzidw0ho13ztpoSdWbPOGrFNBIIIUR3BujPNWyvcp+mxLNeeC2bNc5Nw411f9CHjq/wVe8Dng7MuA81/hPgZde+lcbq7Qxjz1PTUBR8ISCHosUd4wk2mmvoVxbxuyI4Tdx+4lOUQ+YLSa87BUc57DV5813//kv8CBJ30AgImcM5tHy1FzLpiQ/HnNHIf4/N9GFNXwi2ILBtMnMA0JYqLeXLQku2b7YfSSTNLaWxENzslc9QP3Ws73gZ6v3p95gbyfZnDuEZ/kQ/9zL572/h/i+nvnMVEP8d5fP7MM2hGjyUibySTKUVr7Uftfs4rg/PYDoyPngBKFk/1z15/N/t9wbum9Sa6+11YLFPqeXEOzwpwQ5UkmaO0lpXbACMhHaKFETfTLHcVon3NBa191sv4eTQipGjnfZ9DaO4NMR2N4cD49UXZMJlt84xhfa6eyAfrmV+TYN3U2rs2Px02rf5W9RtDonGxEvu/hmGk1JvTgnC+QNqettbL8GreBDM656uqjfhc4/glu5NzzdIGXIwnOSRCwUDSxh3cmKExE0JXEueBVwAWvlr/S0oqRglSXIFx7nyo3cSHnQiF5Uhd7GuO0qw6nYXUsrdQy+Lpauy04P/UZLKnjPHeyJmho8Yib18R6lnz5tY8yh0kgq715Jlp07kvICd9fwTm7329/1jl42CaS9HAi5451jz83jSYnaKcnP40cV5/fIuM/0Qjt3+kL9ggXK0uZIFxCNSWoKryktRtjjT6P5spSACbW6f2LfVzDxeDO3jRd6byMakKxXdCN6/XacNZOib5psBokcp6WRfBMKwnC2YMp3/ekrkkvzZUgHFXVPuFJbCy+5Gul51USFzKuoVPUdTSOOI4LCfvekrMPIIgsSuQjWJe3Urv/kHNCaz+w5C4tO2Ja+xSadXb9fpGq9c7zgef8E/CG23BvMl7uJw/Wbq4p2pCJ+2RLystzjCsTiGM14phrWiJs/gt9joFXG2mOeJ4nz0+Y7bmIROogy5EatOrrds4BAM7ctAIeZXGUElk+TFo7Pcck9/DD/Azcufl55ZZlwminGz7+XQiiee8aiY3WTku+/HKwN8xcrdTWnsF6T09uAJ7xAcW4stmwtq7cKAhUorYb7JnSfDXXNWNNqksGlH0uLxFdCXO8sOOVkfOaUXOewVdji65RzZXABa9GbYL5XEytXfcT55zIuWA66N+/ohWjgzpuetYVwJPfrf1tPweSztu6AqvGa+inOa7dMSdBAr9Ovjs0EuOu58OvJwnVfabaF6Z4qU/G4Ed+vBtJVuCJp6zGFa+/CJeeZhFCJCbWu4WuPSkl1pnmQ7SV2kPzqh7sVkFrH/C6zFFqzgHV61z2zz35qYxGZEH/UiLAEZvq6mAbXBT4GKQ5MjMYD+tAqlO+BaW4pNQOGAE5OVbVhj62hgVJyzHa51wG5xXIucPBXMv70Zo154eWjCCFB+crJsvBeWuEDL+JnAetFXjW4J145cyxeBygB7yG07t5poW7DjBmxYRNmMPmFAtau8Ukcm5mJktIgfpdExN0BbFVRjazebRk28CyuNtoDm8+SicAaiVaO9+E5ney/8OGO0A8+anAZV8BVp+qvdyiKBDsgnAFfPTTguS42H1Mc6gUahArpoPNNOTc4oiPYhQdp+Pl8W/Tr/v+Cs75eXphTZ97y60559e+cqyGxaKBca8LbH4U+9uJpAWR4bwtdImQjZXWzs5DIudJjiQrdOTcD9mzyQZlWrs8bzJ+LQmxVWM1eLwjxBW/YGucm9K+vIBdKLZXIkKA7vjZAg76s2yllhGqsksXw3CYKxT3a5GPQZYzWrtAnun688LPs/tscR5nNOTc08Z9tv5cHLybUUgHWY66H2hrSpuzVgRyrJ0u7wfcTZYnCFel1l5tjppz/twW+ynu3N9Gw9XJ4T4IwjX59QfISc15wPbluIWFXmIVO0UQo1kLsTTIRktimLT2UnDO7tlSP7XWnLfTAKth0SKpsGYtkLWuAFCPy/ePtmXqJBkmiO8ig/ONU/qHrIJwenBOLQfTQrnt3Hfg+IdZxOAA7X4I5NyFIJr7cZyxemx3mROltQ8X5dPOZzl6P6VjWAADi8VacG6Iwhm6E6X5WkLO9fOVopOu4Jy05/JNnSTAUXMelIJzhZyXE54i+O6nOfJ4TEuxzRcO5FzcO2OsrWjF2DXXxWHTDwVFzut45LEz+Or1u3HlXbOyRWV/Yivw2DeypAG9T2b7Qmp83R+Eyr8d8C4zjShAaPr65Dk3Gk38yTPOwtPPXD9SQk0ATK5yjqWHeCu1o8j5A9EqkHPh1I+MnI8TxXZhDlouzVKW1NW51fjkK2XtLc6SorWTY4m+1+e8pPR+AG6nHABe9EVWQ2qrvXaZWGTa+4Gl/eznI6C1y5rzhWHBOVv0V07qGzIAeKP0Z6cBZJErBIEvRCmp+/UNp1eoMgPAZIMstqLm06SXAk4RM0Ah5yXnUqtf890JlfuykQNYKFrYJYJzczMfMTinHQNcdFHNXH3O5+9l/7so7cKOu7jEimiZrdT6uiBcwcd8n6IEBSuZ0JBzP2TX4Aq2ncj5ET6Hk5/K/p/cxCiL1C7mQj7nOroejGoPez6w7iyGuLh0KKg5kXP2+sxYjEv778Ff1V8LPPK17G/NFYx2+awPa8+vl2SSNjlRD+33iY8Hod3RSxlTRm9JRijxpiCcPL/q4DwMfIn6/2wbQ/ydYnDLqekHm7+bZ9Ra4XRmqvqca/fGU+eQj0BrN59ZhWNOneeerR811dAw7BFbyP2ionUA8se8ESLoldR2sj4J57puQc5D2SZMOfSa6rvDehWq4JXmeL6C5rn94BLmuwlyF8tslFZq4lmZwTlPIIdFCggmHf+ePC/Q7pdLcsR3SjG1UZIYflTJ7hHidyyhSdZAGZzzc1pOcG7UptqeSy305TZhUtuv58H5WWbSzNznqBo6UArOBbusWSVkRWntYTWC6Fzfncg5oS2P4pOIzwCVpWFDjd6nCuTc9z3pM5aRc31NbNUdGjHyYMujtSuRMcc9ddSc09Z4uavmnN9rOg612m0AcxhScx6Ug3NA6ShQEzXnq8ZreNRxDNS58u6DEiRoxSErk7zgVfp98o2ac98YOwCySAXnXT5NTEo7AG2Of+X3H49nnLVhZDbYcLX2h3YrtYfmVT3YrUKtXSgaL5fWLlr0VJlwVD0PZXV1bnHoA31Lzbllsc2kIBx570u+Bhy6u4QuSquita97GKuVXI6JRWbHT9j/U5vL9fYj0NpFq6M98z0URSEXmEMdY1Hkz27VlEVwb9SNUB4rV04KX6AW/SmI7cmv6Qs7FYXTaO2PezNw53eBjeeVvyOqs573/fnSnwa8t3hpszBV0e9PG7Tlj4toYjevOffM2riqJA41VyDjh5LloJlLrV3Umx8BG6BVUmtPrW3DBmlOVuSCKR9r7yPZeNu5U+TcTKAciT36dYytctaLyomNC1/HarlNobPl2uPfxv4BuiOzXOScrz9TjRi7sRL/mm7AG+iadNKTSx8RNZyexzd4WzLJqDkvCvb8mhpybqG1m04cPW9HQmztRB0HFvvIC8D3gDOPmbJf64pj7a9X2PGrx3DPLGPVjOR4VgnCeb66nqQDZJP2zwgz0awK5LxOhPe6Ff2obSZYYux8Qya+ObkJWHUSwpMuhed9A0UhAmv9XA8W42jFAcKwfA2xpU3YgcW+7HctbcPDtV+PuJXa494C3PFt4OG/rb0s9oLrd7K1evVkEyg3D3GveaZiN6A/i/qkpLUHyFAUnB/H149OkqEoYK05RxChwWtYhyLnIqFrOx9uGq09J+sXH0uiDKGo6t5gmLmP2RgNgv6+NMi04Pzw0gDbDrK5feZGQ4Bumci5WNMrgwqN1l5deztSItOsORfde2plhp/9Oxy09uXYiMg5wHzGNM/Kvc4NWnuzhJwbLB1jTaoJTYEhtHZnHbOl3MdWcy6SuTpyzhOAVPjSb4Guhk7k3KHrM8ODcytyToJzkbi8buccpprsM5rKOb1PpiBc1FT+Id+XM1Kj3ufleGO24FyMQc/HagubtMomhqi1C7DjKHJ+1P73rILWnsGH7wFrKvoDUltj0torTNDao8B3ZrdEoF2UHH5LzblAzgMj873mNDeKuUxUaKiJ1mWznNK+6ZGW7xwenK/mSY5BmuNwRy0Wh9p2WjsVFsqE4vZyN7YiVyJAPNs556ug3zMQEi04p7T2057F6sFc4i+OunOB7pZp7RYExrRh/cxdNliSP+bwJVPBM1W6R0XOXffcSf10IOfCjiQ4J8+wKAp0Bpnu4NoC0SJHP8mR03k1zEG6v5Hz1krgwt8DaEs8eUyfod33kR2hH7PsyJTMSWtn1z7dYs9vvjtAURT293KTvVlrIaMw2u6TCM6JQ9XupwatndAARdKkFJyT9bppn29riJjdSWsnykH0b30LOO4JwPM+XXFVdhOK7UBFUOBXJN20dpeeYiB0Dynk3DWnSn3OK5BzWT5gEYRbjvkhQ9EuvwF44efh+b6ky2oO/9P+GgfOeDmuLk5iCU1LuUYtFEGnCo72LfTxxuQVOFBM4lf7f4a3H/tZ4KXf0D4ngrvacs9/6hjW5vPit2gvCyRJrImbZmwJ4Ni9t5rUZkB/NvVJ1lEEQOAVaizzvV7QS60q8WGt1DLSaWKuVKxRwtFf6qe6P8Tn0fyAXWNpX6gw6sRHgWcvtwPriQ7oSYbreI/orStbMriRZktkaTXn+nMSa3plUEGOGYUCQVwuck6SYqZa++nPZsyiR/6u+xy07xDI7X1Y70esOQeUKFy5nZqeGGk1zGS6iZybbIkhNecCVXYmMI1n5kelPueeHyj/WdPqYPfQ8zyF4Ht6onJoKzVjrE3z4LzE4IQKzleP17BpRRMbphpIskK2VNOu0UyM0+8xeqADQB6rMd3lwbnW41yYeM5BbTTmIjHhw/aSvJykAUHOH6I150eD8weiVSDnKQKsnaiXazsctloKwg0PzsVCGDlQc0C1U4sjY4OyUPEzWGjtw+xIUT6XUaVmANhsCc5HoLXXwkAKDu0h7dQOm8i5cGjIcboQC9RykfOihJwfyogzZdT4b13JHPBmHCyvztFRxyz7nFci546F0dGqb6j129qvQsQqMDfdkZHzEelpwkqCcPdfcN4ZpLIPsNZKzRaIFjn6aaaXj4j77rp2Z835gySzbKKzNnM549zRn2qw606yYiiCJ2ii0qkQDih1avl51IhAZruXlgXhLHRH2/kBcCbD1hDk92xbvfmmC4DLvlzWzBjBTiRq0U46rdbnvKI7gucr7YvO4eE158tCzrnTmjpo7aMaEakUZTdi79IcvYf/Fm572B8A8JgzmJRbZTY8tsbT8bR/oYcvZI/DI/ofxI3FsdiRrSwlP7tHSmsX522Y2XZo8ypHcO4yen5ivzaQ84AGXzLpws5f7EGF7TuCSNJ1NVr7y7+Hn299BT6TPl69RgM9z4IwQtWct/spsP4sVop27MXyvhzue/xQozEIAT3JXLU/2uj5ktJuY7NYW6mR1+pmcM6R86qgghwzFLR2V3A+WeJw8A86mFR+wBKul/653qK2yu4P5FyjtVcnVeRcNYNzI+lf0vEZotZeH4ac94dQpW209sDX9nPPxQAjr8uWfZ463wFi9BHb14uKmnPATmufbSvk3PM8XHAsW7PbtgSEiZxTP4jeU7EPxGo/kbR223gWgMGQZIzNKBJvS0yJRMqymUkPEjsanD8QrQLxyRCMTGkHlMM3Cq1dBucWpXZhYtEU7WWk2YLzKkE4l40adI1qM8cBMyeo3zc9qvyeEWjtgGp3tJeIwpUWRZHlJ4vRFfm5yMc3lFr/DLWikAuoqDmfp9S2xAzOW3jjpSfhnc84fXnf46DZJq6a8xGYBmaGe2SztHQDAK+kwnofg3MXAuAShBM2rObcYgo5V2JJGl3dFogWBXpJLstYABAHiVw7fXb3N3L+v23iujzfnWUPHWsfv9565Ms1qpQ4M0wg57IERNbh8u8QNf5gaIc47mI/1ZkPlNYuX6tAzkcIzp315lVWkdg8frVyasdcjqfnqfW3VEdrCHgK7Yv+vFqHXHNKc5i9ygBSo7UfKS0csI55UZpgOvwi6Jls2IPzOtg4ojTn/RyVEm1C20RoTNh9Cs4tJqjewrasKuuaVM51ipwLAMCoOafruVfoNedSLdza+iu29wjfcA7+Z+MrsB9T5L00KODn5KC1t4Ug3O/8BLjs3+TfD3F3JqyN7gvRgKvqmYi/iesoigI/vpOhjdbg3PP0PdGi1k5tqlXDhcfPaBoxJSPPMYqG0Nqf8pfA1ovwmdrz9NedyPkRdIC4P2rOjT7nVSZ8xiQ1fGGzp7sZnA9Ra5dibMNo7c4EpklrF63U1Poo+tKX3k/GvVjT2oTUvuiPa+eo2drTGTCw5dHayysctPY0y6VvuoqzbEXdubAxjdZuJG/o71oPdP4ZknDqpmw8WWvORTJ9GeUn8pR8T5YXmGNfMBCBh27N+dHg/IFoQ2jtRxKcL/ZTmWly2SBVtHaXib/VYmOR3vjw0ntFcG5tpeayKrX2IzWh1txcCaw8ofz3EZBzAFg7UVZsL9X6iP7Q5DhX1y+E/39+AWy5cHnnXeSk3ytzIOcIpR5JmQ3xmouPx3POdWTSXbaGBPOkzdzAqdY+XN3eRAtGtuOeAADoR/rnvbrhiI7KsHA5q2e+0P56idZuOFD3qeZczcGMKtzzujXNZo4rI+c29IKq7Ttrzh8kmWVxnlXn60LOJ9azj3oeppvs3mpzxWIiKJNOhdn72RjbNYK8DkqCcGZwXoGcO2jta7XgfKry3K1W4TgLxXZgSOsZV4sl6tB7Pq/95K+JDhpOtXYyh6JGZXAgEoHtfqpav90X5JyYeH59Qwxqvkva6Zl6JACCqQ0A9KBzn6SWs2uzBU7ie+4vZMd0frcsFzmnfxNib8Lx9kM27m3rpUdQbJTLqfibFOJsMFa6SYa5gvZRtgRpJeRcULlFmUggx013kMma86hWEeAaRmnkVc+kYSQZvnXzXly17RCiwMPFJzm6ZZjII2U/GmNqw3QLn37ZBU5dH/N4cWgPUKRNrAde8lV809cDN3fN+X2YT8tl/1HTaO3Da84BC3JuBOfjTeM4DSN5boyr+lBBOF7H7KS1G9dvaaXmDM79cnJoCaSdI9gcsY7N8bXAG+4EnvpX2ssu5PzQ0gBFwQLcaV6G8UgjONcEEkvX5QjO+R7pEf+uw4el2UYNgHrOrpLKIaYU2/WxP8hypHx/qBRWfBDb0eD8gWiVtHYfZ2wYHZUcq4UyOBDZfuex+YZta6MmTNLaTeGcxjT+8uzv4An998qXFK39/0fkHGCCVlETOPP5dsdwlGATRLHdgpx//cIvAue9AvjVvy0d87i1U0d23kUuHTJBKZyniq1CfOq+2qNfB/zmfwKv/G+m3slNtMwp09pHaNN1pLT2054F/MaX8aMn/af+lbQnJzAycu45zq930R/i1YPfxz+ll+p/KAnCGd9rbv4jGKW1C4cv8Wlf0UB2Qdj9rC+xZ3DGc9FL8uHB+dgoyPmDZPOyMQNMcyHnG86RPwpq+7DgXCLngtYunr0MzvWxUyNBYrmVmuncVCHndqbKGr6+TDcjbF1p6UM+zCpQU6rYPlblzMhnUOWEc7V2Ic60OCQ4pwjeEKdc3OM5wnpYds02YEXxXVRZUd4wUY+AM34deNjzgGf+A/Cb/wE85b3AlscA0IPOfZyJdtwqtj5UIedHlFywmCm4dOzqZYqO0n1YJLlEsFOfLCPAwoyac6EerlmRyVpts+a8M8jws/wk9YLomgIMDc5tgML+xZ6cf2FtdESOBj1VyDlNMrT7Kf7kq78AALzqouNkMqZkpqI1ZWCYpVGj+DjkeAI5d6q1c1vMjGenIedlxe1l2Shr8zCj5zA0OHfUnBu09fGmsR+MGWtrqZUaG8tuWvswQThLzXmgC8L59Dt9e6CuSicKSRGfR4ufo+O7LWuaFIQzWGLC159pxTIJtH6qgS1k/GrBND12kZcF4YTxsRs01NozSIzyMGrhkSPn7Jj2kg66Flv70T8E7KHJB3iwW0UrtQ+/5HxsOXHrsg63ZqKOu2eXsG+hV+n0iYWwCumO+d9qJpXEC1DUxtEu1GKpaO3/P9acA8CaU4G33OveWJZJa7ch58Ha04EzLlFvJpnCyy487ghOGgZyzsTEdOS8TME8Igtjheov7lWHhys4p/fLsYQcKa3d84Djn4DGnbMAdqhTbBiO6IhJHM+ivgwAe5cKfCM/H6eEe/Q/mAGGWcMmBAaXYfQZtiVyXgPE/uIHqEU+FvvAwprzsP5MppPQTzNkVBDOpthKAz2t5tzdQ/gBa7JO+AiQ8/UkOBfIebea1q6CMv69Z/w6cOBWFox9/12l9YIKk5VbqQ1BzmnSxyawB+D8rSvwlNPX4rEnrhq53Yz+ndXP+cLjZ7Dt4JJWf14+hqC1VwR5Yo1urGBdDIT2hev7KfIypEZY3GOxznmeXu8/slnOxSoIB+X4TTQithb+2kfUH7c8Gk2+xg+yHGmWIwx87OetSY9bNYYrsM8enA8sfdrvg1HkfKIeYuWEJVAchmxe8qfAwi7FlhKBo1ivfR85PPggdGJfR25j234exGjG7DMdo5Vad5Dh5mIrblr3bJy+50t6pxZXcF4X9euZvOfC9i/28bP8ZBz0pjBzwpOqr5dYc5k1551Bhr++4nbsXehh04omXnPx8e6Da32ifSAlrDYTnBglwCXHi4fR2rnNp+beRZlU9xE5n2TsEUwsk5VHTe5J3tB1QLAFysG5/rkJEzk3E5/GOjysz3n7CFqp+b6HnLzuu4RNyblo7IzaGDBYxFzB5uJyymCkIJwhTEyV2qk98rgZ2XVAF4Qj98kMzinziY9lGpxnfSbia1Vrl8H58mvOARXwmzXnS0JsM/RH1t96sNmDxGv7f8wqaO3HrZ4EliOwBiYKJ4LzKhuF1i6R87gsGBQHvoYoiTqcZSHnvyyUr+q4I9LaVa9zFRQLlUyxSKrjqMUoMsXzRjXSSi3JWGut+W6CK7NT8cjgF8CZzxtygCMwcp+SI6G1BzUg6wMnPeU+nYbWCg5A3DQCivuInIsES6seAXRfM51bMzjfetFI30tNOHu9JJebTBrUVXDuBTJwoJTbXuJqpUZe02jtFKGkwfmDJLMs6xqPADkn5RgiOD88KnIuxtqm84Hf/Dpw2zf5eZjIOQnOC8OxqewLDv2amvbgvB4F+IffOLfynCttiJLyO55+On7vCSdg9XgFiiFVgSvWLBGcN2eAQ3eVP1s6ZsTuUZ4MddKE8zzHEyf1MLjfEhUCgTd7lCtauz2wpetfJ8kwEfhSw+W4VcyhbvdSrcUmcP/XnI+TmvPjVo/Bs82TqucGsO4L1NacytaNYy6QL2UI4YPMHf682zI4J59/zBuAzkFg1cloxbcDKNPaO/w+/PTUt+H08x6vB+eOmnNa87vUzzDZ/P/ae/M4Ocpy7/tXvU/P0pOZTCYLkxUIISyGbIQlJAokryAEBDcEIgFE2RchHAQMgnAUEORVAZFEz4H35aCIPvAcFFBQVpHzBIiSHBNPFkKCIcssmaW3ev7ovqvvqq7uru6umuqu+X0/n3wyS0313V3bfd3X7/pduWvvo55BvK9OwiVjHseTc6yXikWt1pxnt/vLlj14+v9sBwDcdvrM4iarxjIik5Iz3e9L4csPzgsawmXZlwzqtbCF+pxXkgA5+XbgiC8AB8wt/28FoUbgU7dmjnWp4Nxy5twYnBvKDso2hMuc46YSbcC05hzQZ8v9BWXt+cqNgUQqU/bQuwN705l7STmtwUTmvHcoiXgyrc3PCwfno/H//XkbAIM6QP6c0im9DF1WD2bP3QaprDU9lAn2jYaVAKqqOZf32WNYmOoX5QceNYMDGJzXJkVk7ZVkwayawglZe9HgPPu7iNEQLhBGKODTHL4BIJ2tCSmr5tyN+lgrrcFgnjkXfc7bjcF5NTVeR3weWPdLYM4FOrOLvqEk9vXH8dXEVfjB4f/EopMvKG+/VpDOL3Es8zJXxZQGl78N7FgLHHJqVcOIGSbKwQoz574CAYtYYGmKhPTBuTF4kAPhaDvQ3GnpdWXkFWrx0FQDDbnX9fm1/quy5HYomYJq0g9dL2uXJiOBApmSusmci5rzIpPHQsGddJyFrL27hCFcXs25cV9FZO36zHkgPygynndNncDMMzNZiEpVJaUocZz9PqV4YA4Acy4APvgL0DG98DZacG4o8SiWtRW9cgstrmTJZc7juu/LxkSeXTBzPiDKG8w/v3Ag0740rWYCz5ZIEB+JzHnWaC+ZziyeygHcoM3BeSToQ8CnIJlWNTl9HuUadsUOAL6xSRcspRU/oOYH52JhUZc5/+Q3tVIxsxZkQC5Yj4aDwKwv619f3LNMrrWQ34d4Ko2+eBKxaO59iZKCMTHr3juA9ZpzIZN96r8ygfkpR4zDokK15gKjIVyyWHBu4ZyW9hcKWcuc9yQD+EdgLKb6suo3OxdrQ42ZxctqOf4aS5sFC1yrxqA+1mQ4B4KRzD2mgJqnwWLNeTmZcyA7z8iKTXQdD3SGcLljqnU2iKc0T4Ld6czxKsejoiUShN+nIJVWsbc/rs31dwmn9iZDcD61HT4F8CmKXoYuj7OorN2XHX9ujGpWwWlec549PsZyQYuIMRpLOkTmPOpRMziAwXltki4sa6/kxmq117nm1l4kmM4ZwuUHo0G/ogvO1awCoDy3dhckKhZl7eOyk4Gd3YNQVRWpdE5mnp85l/dTvN9yHmc8BJz2ABAIw4/MCuf+eAp9g0nsG0igB03YNfWYfGdSO5AmBXEE0BA0yVwVa6XW2pX5VyWtUcOKd6MhoLF4Hfj8+sUiX1Z1srM78/BqjISAHukPigUYUxZaek0j4UBuUi1qwdJyIK34LGbOTWTtBdrgVW0A5AZinMWOrVnGJaY/31obrRnC5dWcC7RWTwZDOClQzK85LyFrVxTg7NVFx1M1dizCLP6X0tvImXOZYtdOKBucl+hLHdFqzjPHruLA1uQcElmloQKyduOCoEBRFERDAfQNZTwjBhMpbXxT2huhKJkGKz2DCdPgvOIFBpNxNEcC2NufKBycV2LYZTC9VH2BnKoHyAXn2cAlLL8d6dlQ2BCuSJZLZOhMzt2mSAB79se1jL3gn9l5TGephSYDUYs153Jw1BQO4JZTDy24rYZR1l6s5MyKEkQ6juFgzrk+lVZNjeSSWYOs19IzpeBczpzXl0FozhCuuFt7k9GtHciYtvaKDhLGzLl4zhaStZff5xwAFH8QEM0MAgWCc9kQTidrzwTn+9Ilas7NhuPLGKB+3BfH7j4pOC+QOe9oDuP+L8xCWlX1iwDyOVlU1p7NnEtj7Ellti3a57xiWXvmMzMuTPVrx6n2z+VK8aZYv94pUnNeTeZ87bZ9UIu0aStH1p4fnEcQ9PuQkIwxRHBeVp9zJ9zaS2HZrT3zOfbHU+jJBspA5r7WapzY6drWFDmeZiiK7mam9TofSmqTwtZohVL5UsiZczVgvoprUWlQDU3hgG4S0tBkCM4tLuL4pAfloCRn3ZntVd/UULgcQWNCVmp87JWWXtNIZnKf+RzFQ1ORM4jZmnMAiKdyY8zUnJcwhJPd4+VsTT0bwhWbPMrv65M3A5/4ckaKLiEy56Vk7bk+5+ZSRePnJitI4nmt1ErI2oeD4TrOYiJn7FxQLGsrsi8lM+f64Lyciap+LCaydrM+5ygtawfkyXRSu4ZDAR9ao0GtNZ0xiNQM4Ww0LBLPAiGnz8OG+3HaeP0Zas4LvR3xGYl2VAKRSTcNiDVZu0lwbuhUIhALnJ0t5U34G6QsW7FjIj/zrjv5YF2Lw4LoDOFKZM6tXKfS5yF3xjGeYwKhuHotPTP3w0Ch4LyCMpFhRrTzTeRlzvULSYrZPUe+LxVopVZQ1h4XsvZChnDm5Uo+ydtGH5wXqDmXx5GV4n+EUbrfWUW4scumcOIeNaY5/xr5zJHjcfonJhTeYTplCM7NZO1+XB3/Gl4OHINf+TO+D+at1Crvcw4UdmvvHwGZcwbntYjNsvYTZ4xBOODDX7bsxX/8ZVvB7axkzkVwHjH0XBXBOSQDK9WCTF7jqPMyN9XZXym9rd3oMsHFJ2giu/JRz6BmBhdrCOabUpi1rakQua2MmEgaM8u2IWUAEtnMef421tztq0FRFJ3MNNrYZNzA0n580ljljI4oTWiKGB4aZosz5/wC+PqbwPhZll7TDHEMNVm7nAFQ/KZtngYTaaimhnDSMSn00JNrxuomOC8zc955GLDsh8CoybpNRCu17hKGcHk154KO6UA4lldfKU/oSxrCObRoVRSHrsV8RE82o6y9yIKhyL6UypwbZO3hSgPbrnwJbsHMuezWXgA5KyzM4DpbwlAURbd4KiPuN3a1UgOAL82bhLmTR+GYA0ebb1Cq5twCap7qw1BzXuBxbtrnHCU+B80QLv93ueBcvz+hABxTZnDeqJO1F56TiBK1wyfEcO6CydZ2bqzpbi9iHtcxo/T+pHlewJ97PhSqOxfPjdfTUpZf7uZi7GNd4xSuOTcs7pm9Fzk4z1M/WTSEKxT0FZC1+6XXCehUFHJwnvs6dz9JAotvRO/CW/FMKtNerywDZZi3U8tlziuo9VbT+nlF0Dxz/qv08bjRfx12xzM/MzWEm3Qs0Dwu1864TAq5tZfsR+8B6mbZ4Y477sCzzz6LtWvXIhQKYd++fW4PyTmKGMJVcmOd1N6Ia08+GN/53+tx+zPvY+HBHZpEW8ZKzbm4aTU2GB6MgUher+a0yJxbudmc9gBwyvdLmho5QhmZ4HGxCLoHEtjRPag9MNuMknZArwAoN3NuoCk7adyfrTkHTDL1duHTB+emksxq27JYJNYQ1LKfjcbFIIso0vnUH09BCHHFBK85L3Nu8jrRtvxApExE31QxsfeF9JlzszZP+Zlzk5pzfwg4+lJgyyvAIafkfq7LnNfJbb7czHmB92XVEK5gzXnjaOC6/85b+JAz53JfW3NZuxuZ82F6TbEwlidrL/L6Wua8VHCuN4RrKFcS/vU3gPf/F7Dg0rxfCQfofLf2zESvkKw9M45c4CmCcCGrbo4EsKM7P6s5mH0du2rOAeBri6bha4umFd7AjkUh4z09ez32ZjPYhYKHRrmOVkJkKc1l7RYy54bP9aMKZe1WW6mdPacLiZSKZbPGF+9FLuMzBGSf/l5GrjxHSjaseB5Y9xTwyZtK708X7PvR0hDErt6hgnXnYsGpzx/LmGPuWq9fXKyzMicxBy1mCJeEHwGzRXrR4hHIO5ftN4STas61sSvm2xeStbdNxe4jvoqB372EJrMywhK0N2Uz53Jw3mcua7eEmtKff3L5pKHmfCCR0uwpTD07xh4GXPN+xWqNlgJu7Tk1Tp3MbSqgbt5ZPB7H2WefjQULFuCnP/2p28NxlmKZ8wpvrCuOm4r//d5OrN22Dzc+9R5WL5+bdxNIWJC1n3/MZPh9Ck6f1Qm8Kv0iEM7rj65mg1LLbu1uBOZAZpIZbsmsuJa4iYyNRbB+Zy92dg9oN462UhLzKoPz5nBu9VBkzmNOZc51hnAFZO1yZsXBDKGYLCtKFa6cUmsVebVcZM7zgvMK5Vclh6EF51lZu6F3qDAbs1RzLt8D/CFg6XfyX9BKL/paQ1t8KHK/COoXNcyIaX3OLWbOzTKmJhle2RDOpyiZzzgVz2bOa0HWPlzBudRKTaZY4C3O9xIuzWEpCAYqkISPmZH5Z4JmCJcqJGsv/PnJWWFj5lZTNkmZ81Ra1RYB7JS1l8TYXaISCmTONVm7z7w0Tpb+yxSdSBcLzjVFgkHWLgzhrMjNJay6tccagsUXQMzQycZ9GS+Q0/9f/TZd8zL/rGBw+W6OBLCrd6hw5jwpWkv5gYt+Dwzsy7U/A4ZtQd0ucovVxprz3PmdV34hkINz468cMoTzS0q1gsG5dAyM8vpqSmCErN08c15JcG6sOZeC8+y5E5Hu06ms8XNToQRKFWUUhWrO94+AmvM6mbUBq1atAgCsWbPG3YEMBzbXnAMZp97vnXUETvnBK3hpwy788r+246zZ+p6VcQuy9gPHNOFbp80E4v36X2iydgnNEK7Ga5xC0UzGxUJgNk5ybE9kHxymmXPdHx1Z1fDE5G9H9yCy98GiWZ6qMBjCzRxtYjxksQygWoTcuDEUyCwkiXZM5fCpW/CX//Nf+PnQQlyYfQAmUmltZbnFqACxQRZqhqhhExNLvy5zLhnCSXXxQ8kU0mqJmvNCiyM6w7k6eYCJcVaZOR9l0RCuYM15AeTMedDvywXnvhqRtQ/X4qaZIdy4IzPyxUKICV6pzLlBfWVn1lnzdUjKC2Ap7ftiNeeau3IimXMLz2ZuhbJJzvDKC4F2voeCjJ6ecdM+8otV7yqv/WR2Qt6nBefmf1fQEK6YrN1Czbk8Me+PJ7VFkHJrznV9zu1uwWR38GuQyef6PZtnzkWwGQ74Mtea0SxW1+e89qtZrWTO5RJKHUYvDAmhBCwsay/XEC6znV9aeApZyJwbrxVtAatIuUUhRBnGnv1D2X0ltfdRUXCeTunPlyJu7XKJkNXnaDmIhXPjotRIqDn37jsDMDQ0hKGhXPuwnp6eIlvXEEXd2is/ZAd1NuPKEw/C9367Abf9r7/i+ING68xOkqnSsvbcOAw3qUAkLwhPZ99HWa3U3EJeZS7C2JacY7s/uyJYMDi//n+AwW6gpcik1QIig7B9X8bErCHo12XxbEU6rv//JQsxesIU820Uf778yWaE6Z22OhpsAIbKDM4bR+P6ptvxj4H9OCd7Q9/VOwRVzSwaRfO8E5zJnOcm99lSj4ghc671YNZnztOlas4LLSbUZZ9zcyM2HbrMeQFZu8icDyTyek8L0mkVffECNecFkN3aQwFf7niYtVIbtvpv+TUdfpxPmA1sfxuYuSzzvdwp4NN3F8+QWMycGzNHdmadzRbAxKTPp0AzdjNDlqFqbuHZZ2ezVhttHpzntaJ0gmhbpjTMDoz39Oxx7bEYnO+XgnNVVS3K2vN/JwKk/VLNuVjcbAj6C0uPC2BV1l4ROlm7DcfbkDlv0TKIpTLnBV7bjcXCKrDSSs2vFmgtF2ktuN+GIjXnqqqWL2vXMudSzbl8CHSGcPmGhCLIrKbtougUtHd/5tz4uDeu7auxkkUoVTXI2vVzFSB/oc1XjbqxCELNVMgQrqL3VyfU/hJaFdx5552IxWLav66u6ls8DQtFDeGqOxm/unAqDp8QQ89gEjf9ap3OvV1kgq0F54abVCCsOWxqlCtrrwPkzLmQERUMzqNtQJtJcFsm4kHxwd5McO6YGRygO64TRrcWXgQQwYiDAUEse2PWzFlKTOwLYZSQCUl7Z0sEPuO5WWE/zlIYH/aBsF4qlgscKqg5N6Mug3MrmXPpfRWYcIrrI5VW80y6BL1DSYhbn9UV/4h0LYT8Pn3Wz2gI5cY9z+ng/ILfATdszhnwtR8IHH8tcMq9peW6ofJqzgVhm9qQAeZu7Tn1RFBrs2iG3hBO7xbeZBKcD0ht1Irt1z7sew1fAUMrIS/3NXea/l2DSc15PJXWZK/FDeHyz91mE1m7Vm+eNeMrh0aLsvaKMNSIV41Pv7+WAv2eBeK5UdBAUb5vpov3S68FChrCSfcPPwrMk4v4w0SKyNoHE2lNmVhQLl2g5jwgHf+QheDcuFifU5eUfw/PGcJl7kvC16ajufxrBEAm/pDPF9mtXcjaDfPCpnCgstcqQaE+56J0JlrmAl094WrUtHLlSiiKUvTf+vXrK97/jTfeiO7ubu3ftm3bbBy9gxSTtVd5AQT8Pnzv7CMQ9Ct44f2P8MrGj7XfWZG158ZhOHUCkfyac5E5H5bJyfAwNhuc7+we1FpXlJS1V4mYpHywN1NK4JikHdCfe8VW20VQOAw155rErMrgXKxO78wG5+NiEZPz2JljaXzYBwtkzuN5mfMKZe3yIkPd1JxbyJwHStecR4J+yfXbfDIrMlDhgM+yCkUOFDVZO5Ava3fr83b6df0BvWRUUYBP3QLMXVH6bzsPy/w/pnjPaKMBpa2ydpPgvHtAqCeKf3amNeearD0/uzPoQBu1otg4MVYChoUmZIIkEdCkF14PHHIq8Pl/1/2dyGLFU2lNhScH6qbHUgRbRVup5T7Xj3orqzcH9IsDth8XXTBtw7GQF0iyNedAYVn7kCxrN92f3D2m9oPzYAF/CEuLnoefDcS6gMPOyvtVMUM4+Twr1609KMnardScG2XtA1rmvPyQrM2QOa+q3hzIytql89lE1u7zKbp7tWmPcxuQz3s5kSjUNE5k62sFV2dt1157LZYvX150m6lTp1a8/3A4jHDYmUyYowi39q88B3zwZ+DvzwOb/2Tb7g8Z24Jln5iAJ9/+AG/8YzeOPyhjmpUoR9auKJkLVQRzgTBC2RvnHiWGNrUb78eOB7Yhv81YHZPLnA9ohkBOB+eNw5k51wXnRd6XmDw4KN/NBefZG3CJHsmF0DmjInPsgKws1WSRyQmMD/tQgzFznl/DNZRM6WXt4rMuV9ZeLzXnVjLn/kBmwpNOFg1GR0VD2NE9iH39CXSZJFJ6skFZOZMKOYgPBhRJ1m4Izo0u5sNFLS/CzL0wE9CVKPFxVNZu0kpNyNpLLXgKMzM5ODdmzmXJ8UDcfqf24tgXnMttoVSfHwr09fSNsdHAFx7L+zs5+O1PpNDi92lBR9CvmM8rRFbOZOHVrJWasaSgHKJOBud215z79AskWr/nAkqgkrJ2+d5QD8G51ufc3HywKOFm4Mp3TRdJitWcC0l7NOQvrHaRj63i114jJD8bdJlz85rznKw985oDVcjaja3UNKf2pgpjH9XY51yaq0jnTkPQry3YOVFvDuRqzpNpFYOJdJ7ppJdl7a4+zTs6OtDR0VF6w5GGkLWPmgxMWgBs+7PtL3FkVyuefPsDvLc9V4efLEfWDmTrjrMTnWADgtks3znhB/Cf54zD+rcbAGytfUO4MhCZ857BpBYsj3I4OBeTFJHxaTU6jNuJHFgUDc5F1tC5m+OE1syKrTYRqzBzblwtF5Nr08y5Q7J2o8FMOCK3JzHPnA/lubVn9yGP2UuydhGUl8qOBBqAeG/RYDTWEMQOSd1iRARSpTKmMvLEN+T36duDyZPzUdWXspTFwUuB/37OtH1YzaAolrw3jHJJ01aOFRIqImsv1uMcyAV2e/fHtdprkb1tNulzPpisfLJdETZmzn0mmXORsW0I+gsutof8Pvh9ClJpFQPxFFoiQcmpvcDnMOvLwMAeYPZX8n6Va6WWW/TQSgoqyAoG/T4E/QoSKdXW3vMA8stabN2fP9expZSsvZAKSD4/2g+qfnwOU9AQzioFniFiLjCUTCOdVnVBeF+penPAsAgjm8DlXk+nFNXJ2nNfi/uJCG61mvMKzsv2xsy1sLc/DlVVtcz5mDINEzWMbu1y5jyZ8/CKhgJau9Jy/R+sEg35tXtKz2BC+3z20xCudti6dSv27NmDrVu3IpVKYe3atQCAAw88EE1NJo7S9Yqq5gJeLZNkf3B7+IQYAGDd9m7NNClRjqxdjE+4ZwfCCKqZv9uTbgS65iHx1nuZX3mo5rw5EkRTOIC+oSQ2794PIOeW6dxr6i9TRzPnDa2Zfqz+YPGgTs4aOsTJMztx3+c/gQXTsgsGFbYJMkrIRM352FiDfiLlCzpWK2yUX4UbmqXX9WmBn9GtXZGDc6FWkORdhd3a61jWXirTP2UhsGMt0Dqx4CbiGtlXYDIrAqzyMucGWfsJ1wMbXwC65gM7381tKGqyh4svPA70bC/6edQLTsraNV+HVPnBuZgUint+JOjTTLrM+nEPVNoKrgbwyZlzxQcFuR7nxTJkiqIgGvSjdyipZSEHSk2iW7syPcFNaDJZ9DC2sSuXaCiA7oGEA4ZwNgfnJq3UgCKydpE5L7aYtXJbprtEpKX68TmMuNdWHJwXQD7uQ8m0LhguaQYHFJSph+Rng9xqUHcc82XtWua8ivuF6E6SSqvoGUjmZO2VZs7TKf245blEKvc81cvanZljKIqCpnDmmu0dTGiJmgGROWcrNfe55ZZb8LOf/Uz7ftasWQCAP/zhD1i0aJFLo3KIM3+SuUDC2Qm8A60vpo9tRsCnYM/+OD7sHsSE1gap5ryMzLkgEEEorc9MiLqzunBrL4POljD6duUMpUaV6nNeJcaHhWM9zgVWerHK9bYOEfT7sGyW5KJv0nvaCoVqzse2RIC0dK475NQO5B/DSFRaaFBV06zeYCKNkFnmXFd6UGDM/jrMnGuGdyXG+4XHsnVxxWXtQOFe51rmvIxJhTxxCgV8wKGnZf4B+kzDcAfnPr8nAnPAWVm7MMvSZc6zwU4pWXvUEJx3tkQ0AyStzVUBQ7hhwc4FfGlirkKfOW8qcb1Ew5ngvD+vRVT5x9FM1v5RFbJ2ABjdFEL3QALtTTY/swuY6FWMwf1dyNoL9jkvVXMO1EVQLhAJorya8yqR7ycDiZQ+ONcCPovBufS1fuFWuhblc8FE1m7sc17JolE44NcSRrv3D1Vfc66qBvm+dE5JrWzlBbcmh2rOgYy6rXsgod2rAbnmvG5C2LKpm5TmmjVroKpq3j/PBeaKAhzxOeATX8zJeB2oGY0E/Ti4MxP8v/dBN4AKZO2G1bWcFCmzn2Ra7M9bwfm4mF5ebfuD3oAxsHNU1m6VYTCEy8MmWfvOHpE5N8jaHQzOjQ/8hqiUOU/FNTmipZpzK6Z9FvqB1xxWM+eKUrKnt5Y5L2AIZzVjKpNnCCejk7VPtrxPoseY+SvoPl0BZh0RugeslTeI4DzX4zx3r9AyvCaGcLbLpwviUHBukLWXUprkuVBXEXQ0asF5vqxdmPGVy72f+wS+//kjMa3DZrWl3ZlzQw275lpdMHNewq29zijUSi1ZZbDu9ynafcBYdy4WgYpmY3V+L+aZc910V5dpl93ac7L2dLYMBKhcKSSy53v747ma84qDc0PmXF74S+UWu+WxOpU5B4DmcL5ju+bW7uGa87oJzkc0DmTOAb20HUAFsnY5sInksn/Z/Yj9eUnWDuTqzoHMiqnTdYXGbIWjsnarmDmHO02FsvYGTdaeeRAWrDl3qN4cyH/gN8qZ81RcMqvKTRgKurXLwXmhLHM9GsKJBQUbFklatcx5Ibd2ay7dMjpDOOM90s3MuYcwZsptlbVr6hSpz7llWbv+PJHdws1cxavpW1wRds4RpKBDLA6KALm5RG2peL8iYz5QxSS62WTRQ/Q576xQ1n5kVyvOmHVARX9bFF23BrsN4WRZe4V9zuuMQjXnhRYnyiFcwBSubFm7dIzkmnNFLSRrz2+lBmQWsAaqXMxry9ad7+6L25A5T+vPP3m+Icna5bGWui9Ug1lJx0ioOffGlex1HJKlHnZAJjh/Lxucly1rly9aXeY8DVVVpUy81zLnuYlZW2PIkf6OMmLlUNDqZCs1q7iROa/QSV3uc757fxyJlApFyT685GPnZObc8BCJysc0GTdt8zSYMPQ512rOC/R3lQlEspMBxdH3ZSvTFmdcvRdeV/WuxDVSSNYu5KGV1pyHirVfaxtmQzgPMdyGcCJzXqpUKGoIsjulzK2ZIZzIhA1bJtNWWXtuzGlkvs5lzi22nMt+FvbI2pPa/+LrSlqpOYrOwM2GY2EwEhOLR6VbqdXJQmwJQgYVpqB7IIGUWt3nW6idmgjOLcvapUx4SLcoIgfn8nHMnSPys2Qgkarao6Jdcmz/uOrMeRpoGgNMPyXTYSPSmvudC5lzrVOBiacHa86JuwxD5lxVVe1GaDk4T0kPikAEITXzd6qaMadIij7nHmqlBugz5063UQPyM+eO15xbYRhqzvOoNHMuHsbxpJY172jKLia5IGuPBDOuxhqpuGQIJ8vaLWTOC+EPAKfcAyQG6qfWMNSYGbMNlDKE0zLnZUwq5IlvyLjg2Lsz93UjO5BUStCvwKcA2YqovGC9GsIGZReQW6Sx6tYukDO3poZwieFupWYjvvzMudXg3Ni2sqRbexHEPTORUjGUTGlt1BpDfsfcoStGJwO24Zgb3N/F517IrX3Qa5nzQOa8y8ucDySQQAB+mH8OVsh50Oj33VducO4zl7XrDFt1wXnub30+BQ1BvxaYi4WCSmXawmPlfz7er83jhYt72aRTmQWmLz6e/7sCmXMnr0ft3M/eq1VV1fwBmDkn7uJQcH5I1hRu9/44dnQPavU8Qas3eMkcAv6wdkMFMhMgcZMIFOoZWacYM+dOY1wdLGVeNCyIycNwmo0dfUnm/yO+UNafabL2REpzateOoQvBuTGLLgfnxsx5WjULzi32fp29HDj6a5UMt+4RsvZCrdQqyZxHitWcy71gHVbSeBlFUXTZIztrtrXSEWlSLvrdlypvMI5DdgsXi6fxVFqTF1dTa10RjsnaDYZw4eLXi7i39Wff/2AVQYc84e8bTObaqNVa1hywv5WaQdYuFo+Gkum8OmxAypwPlwGhwxSqOe8eSCCO6uY/hXqdW5K1ywsvfnNZuz5zbp5pB2TH9lTVZTDC9+j9HZnWyKOiQUM2vwyKKfPkzLksa3fSEE5TjWSe2YOJtDYF8nLNuXeXHbyEQ8F5JOjHQZ3NeH9HD97b3p2rObcaTEuraPD5IN9XEslc5txyJr5OGNuSMyYbjuA8HPAjFPDl+pw77A5vCTdk7aMmAzftLFvenpO1p7GzO9ObXpvguVBzHjVKsXQ15/rMeUpnCFdG5nyEI2Tt3Y7VnBvuabPOBXa8Cxzy6TJHSoxEgn4t42qnrN0sc67J2ku6tevPE1nWLi+29Q4mEW7yY8gjhnB5NecWZe2i1jwnay9/qun3KYiGMudC31Cy6jZqjiIH03YsWOsM4Xw69VzvYALthjZZJfuc1xmhAjXnmeC8urDF2L1FoBnCFTtXfVm1naEuW585lz1hzGvOAb28vtqac5E5/++PegFUIWkHis8vCsjaS3VxqIYWTTWSuaeIrLlxDF7DW1GTV3EwO3n4hIzkdd32bsTLlbVDn8EL+BQtaaTLnHu45tzpNmoC2XCjNmrOReZ8mMcSbCg7MykeeINxFzPnoVKZ8/w2T4OJlLmsPW2h5nyEM6qxROZ8QBhc2eTWHooCy34IHHJKmSMlRiLSRNfWVmom11ilsna55tnvU9CY/b2Qtmut1JyWGTePy/w/41T79ikFESnV6NZemay90gyXXHf+z57qnNodRddKzeY+54offp+ifRZmpmieM4TT+pzn15wnqgzOwwVk7bma8xLnqjg20jHXXRcWDOEAfa9zu2rOP3L6Gknnzr2oLnPupKxdnznvH8rdU3weU+XKeONK9joOZc6BXN35e9u7y5e1G1AURWcKl/SoW3trNKg9BNuHIXMO5FYmg36lNqQ8DaOy/7e6OgwryIZwuTZqok3h8ATn0ZBfW1PIq2lLyjXnmQePqqr5NediQcSKIdwIR8ucDySQTueXAeQy55UawnnrnlZLyBNUO4NzozpFVdWcW3uJ88CY0TK6hTcZTOG0ybbT9+pLXgHO+QUw6zz79ikH52XWnEcNwXm1cl25nj/X47wGM+eyW7vdfc6zyZliju25zLk37kuhArL2nsEEkmp1n29DCUO4kvXTJp1qPjWjU3qBUdK25oZwgKwySVXtUTHKMA+tKHMulINt0yxtLt+by1nkLhejW3t/wvv15gCD8/pgykLHdj1TMoUTN0LLsnYTZDmSV/ucK4qiZV6NN0WnEA+MWIPz7vCWOOEGYOm/Aoee7vZISiLLx3Z2i+A8+yDSBefOZWQURdEy5nmLK5KsXVyDYrKVqtQQboQjTBPTKtA7pM80qaqqKSjKmcQUNYQjthF2Kjg3TPj7hpKa8VxpWbtUBx/MNyQT3/caMueOyy4bRwMHnaRva1otkqRaOGP3Waw5F/L1fk3Wnsz+vMLgXFr0qOmacyf7nGeD/WKO7bma8xpYuLeBQq3U7JC1F6o5t2QIB0jBee4YNYUDwBkPAwedDBx3VW5beaHGoICV5yWDVbdSsyE4v+hFYOYZwJf+w9Lmw5U5FwunQuW030o/eg/A4LweOHQZ8Pl/B6581/5dj2uB36fg4744tu3tB1BdjbgIxBM6Wbv3TrMDxzQBACa1V+YgXi5i8lcTPc4BYNSkjEGbbIRVozRIK9RacN5ikjn3O7vQIh5m+bL2IS3wGzIE5zoYnFsmHPBrn7exndqu3iEMJFLwKcCE1gazPzelqCEcsQ35c3amz3nm+hHy4JDfVzLjKLvGd7aE8xZIhfRSTPAH69mtPdysfZnMBuc5A8Xik/BGB2XtuZrzGgzOdbJ2O1qpFc6cmzm2e07Wnp1Hxk3c2p9Nz8980za1on1HCtSci1rm0pnz7LlsMHjDkZ8HznkSiMSkbWVDOPPMeX88lVvEqlLWLuhoqiA4H3s4cPYaYPSBljYfrprzvMx5lZ9VveBtXYBXUBRgxmcc2XUk6MdBY5qwfmevVq9SqawdyE1ah5I5WXs1mfha5fZlh+Os2ftw7LTRw/J6WnBeC/XmdYYsYxM1yOY1585O+prCAfyzdyjfEA75klthKOVXTMxlGJxborUhiP54Cvv6E5jUnvv5lj2ZRchxsYay5Ok6QziPTIJrETkQdsoQzihpL6VGklsfmQWHuV7nwlG4uhpSV4nmLhZRcy4WHUpNwmWprvx/pRPpxnB+5nxMNWZXTuGkIVz2GWUMUmTEc6MuzzcTCmXOewaS+EXyTBz5ifk4YelZFe27kCFcLiNbfua85LbGr5GTZQ/Eq+9zbousvUwahr3mPGsIZ/U41TmcYRAcNiGm+74aGXrupqpqsnYvZs7HxiJYetjYYTOkEJOimsmc1xFiwri3P65lcrRe9fKkPOBw5jxcIHMO5LVS0yZbfrq1V0qhdmpbd2eC83JVL3Igz8y5c8gBuZ19zuXjN5RMa07tVh37xX3ETFZt7HU+4JHgPJmV/WseDSUN4YSs3dBSrsLMebP0uf5Tqzmvxcy53a3U8g3mjPJeGc/VnJcwhNsz7TSgaUxF+44UqDnvK9sQzkpwXrjmXC9rzyptKrxOWiIBXcviYQnOs+PPKI+cu8+1GBQj/VqP8zq8t5aBN65kUhWH5wXnlZ8W4UBuxVOsenrNrd0N5JpzUh7iIShMVFujwdykeZhaqQG5oLxY5jyeSiOdVnOZt4B07WiGcBb7nI9wxEJWt0EGKjLn5Qbnfp+iLVx6ZRJci4hrMxTw2br4KQfn8VQ6lzm32KNXTJzNMrdazbkmax/uVmo20tCmfZlUfVBVVQtcSvUzlh2oM/8LWXtlWS6xKL2zZxD7s/uqycy57YZwhTPnI8GtXfMuMulzDpT2iChGTtZu7tZeWtYuOtVYOKcVJXc+GBQV4lrpG0xq8v1ohYt5iqLosue2B+ef+zkAJVNXn0Xc25zMmmf2ny0ZiieRTqvaPaVoyzsP4O13RyyRnzmvXtaeSKaRFK3ZPObW7gajszVENdnjtcYxTpDHypmXYXJrB3IP/WKZcyATOGiZEL8CiLkYW6mVhWhzuHe/MXO+HwAwsa18v4RwwI9EKuk5k8taQkye7W5DFpKea/FkWgtyrE70c5lzk+DcIDkeNkM4J4hKwXk6815SWRVcqcDF6NY+UGXNuZCu/mNX5pptDgdqU85qkumuCl09c+azN7aUktEM4TzS5zwoLVbLdJe5oGaGmSGcLuizXHNucQw+P5BKFWyltlt6PlWzmNfeGMKubOlHRTXnxTj0dOCb/9SpC9sbM6/hdJZeBP+qmgnQtcy5xw3havAuR4abQ8e1wKdAc66tStYeyBl55GTtnMhWy5ePnoRQwIez5xzg9lDqDuMkf2zMneC8vSmk+x9nPgI8dSFwxsN5klsxcQgFkAvOhQSfsnZLCMf2fTZlzoHMxK5viLJ2JxGTZ7sl4YqiIBTwIZ5MI66TtVubZItJu5msutkgax/Uakjr8DyRZO0JVUHPQOY9+ZTSQbZcRwtUL+8XiwGbdvUBADpqdXHab3PNuZw5z97vi7m1D4rMeT2ebybIhnCqqmqeEELSX03m3KyVmjCDA8pppWY1OA8AqXheMC+uCXnxuBrlg1iMDvoVZ8ofDWV/M8Y1484zD8eh41rsfy2JSNCv3bd7BhJazbnXZe0MzgkaQn4cNKYZGz7qBVDBxFOScQWldjXJdNYQjsF51XQ0h3HpYmsumkRPwO9DyO/TVuHHycG5PJFyODi//JMH4cAxTTj9ExMyPzjibODQ04BAGCFJqh5P5jLnQb/JA+iUe4CfnQosvsnR8dY7wjxxX78+OBc15xPbyg/ORWaKwblziM/YCUl42O/Trq+crN3aNOi8BZPQFA5g0cH5ta55fc7rOnOeC87DSGgu6U3hQEnjPGPmvFq3dpE1275vAADQ2VyD9eaAs63UVJE5L+LWnvBYzXn2/qqqQCqtIuBXkE7nTBztkLUPSbJ2EfD5fUrpz7CcmnN5+wKZ8z3Z4Lwh6K+qTW5bdtG/oym/m4QTKIqCL86b6PjrAJl79Md9cfQO5jLnlLWTEcFhE2KVB+fSTUpnCCdaqVHWTlwmEswF51obNWBYa8672qK4eOE0/Q+zCwKKkpkUDCXTGErm+p7+T+RQYNRcoF1amJlyPHDTTiBovQ3YSERkEuRWan1DSU1GWEnmXEzcQgzOHSMna7c/sA0FfMCQkLWXN9E/Y9YBOGOWuXJJ9P8WWU1Rz1qXhnBSe8yYsh/vdGcC41L15kBuQUVkIgeqNG8SWUyxdmlWUlAT+G2WtcuBXDZzbsWt3TOydun+mkipCPizNcfZ88Cq2sUMoWaRM+eaGVzIQoCsBdsWP2uxXUFZe0aKXu1ipGinNhxmcMNNcySIj/vimcx5lT4W9YK33x2xzOETWvDL/8p8XXamW1o1piEcqUWioYBWYzo2Jj28hrGVWilC2eBclzkPhYALX8jfmIF5Scxk7Vuy9eZtjSFLwYYRUX5QTgs2Uh45Wbv9n7HcFaFcWXsxmqRWaqqqVu1S7ipScBLDfuzozmTOrRg/ya3UVFVFf5UKAmP9b006tQMGQzg7gnPp8xKy9ux52js0Evqc6z1YGuBHd1YBFQr4qlr0MutzbtkMDpCC7eoy52Iccua8GsRitBeD8xZpYWpA8waow3trGXjjSiZVc/gBOVO4srNC0qqxJmuXas4pASVuI0+Sx8YKZM4dbqVWCpH1kGvOvTLZcgPNEE6StQtJe1cFknYACAcpa3caLXPuQNZZLKoMJVNaLXU15lKCZknWPiQ5TNdl5lwiqgyVGZxntkmmMw7vIuNdbSs1Qc0GHnb3OZfJLhrnWkrpM+eqquYy5x6rOQdyvc7tqDcHigfnlswGy5W1T/9/gNEHA6MP0v1YXCti8bjaxcjZk0bBpwDzprSV3rjO0MwQhxLasWLmnIwIDh0XQyjgQyqtIlquG6pPDs6zRh7JtObwGhimXuCEFEKeJOtqznV9zt3NyoS1wCGXOa/3yb2baK3UJFm7ZgZXYXA+b/Io/PfOXswY11z9AIkpwsDRyeC8Ell7MWRDOJHZAex3nHeDXHBe+nOS5et7JKOralupCWo3c26zrB0ATroN6P4AGHckgMJu7YmUqi2CeOV5oSiK5hMjgnM72qgBsiFcbhGtr5Lg3EorNQA47YFMXYZBLi+ulWoXsAQLD+7Au99aYi37X2c0SwtT1fpY1AveO4qkIhpCfjx87mz0x1PlX9ySpEtklOQJSoBZJuIyDdKqdGehVmp+tzPnucCBmfPqGWUqa6/cqR0AbjrlUFy3ZLpnajtrkakdTQCAKaPLb3VXCk2dIvc5b6h+GiQbwgnn7JDf54ln346sGZuVeUHQ70PQryCRUvFxX6aWNhTwwV/hAn3dyNrtNoQDgGOv1H0rFB49g0mdg7mQtAPeel4E/QriKSCRzESvdpjBAbIhXL5buzVZe5mZcyAvMJfHIbDDPNKLgTkgdypIaMeKwTkZMSyanu9CawlpBVFI4uXWFHRrJ24jVqWjIb/enbnGas6BzGSLmfPqiTVkFlu6BxJIpVX4fQq27hE9zisLzgHvmC7VKgsP7sDvrz2hqmNUCF3m3IaeyQIxKZZrIutaYhxqAuKZ9mXlyNqBTJCRSCXxcV/1tbRGWXvtGsLJwbkz9wfx+afSGU8DoUaQyyi8ZFQZDPiAeArxVOZ66i6zu0IhzA3hyqhjLrfmvADG4LLB4zLtapDNEPu1Y+Xtz8s7VzJxj5YJ2pdi8qPLnNOtnbiMmCCOjUX0bqw1VXPOzLmdiAyLquakoFu1Huf2Z2WJfUztaHIk6yyCl6FkWjOItEfWHszbb122URNEWrUvtVZqFoMiETQKWXs1GS7ja46p1VZqdhvCmRAN+TUFguzYnnNq9w1LC63hIteWV2TO7bleG4a75rwAecF5PS/mOUyzphpJoD8xMjLnPBtI5ZzzC2Dy8cCyH2k/EjfUfik4Z+acuI3IQI81yiJrMnPOmnM7CAV8WkZzb38CiVQaH+7LBBqVytpJfZNbPE5qdaZ2uLXLGbddvfa0RnKVxlyvc2HsalVhEM1+Frv7qv8cGoJ+CEV8cyRQu5+pXH/sUDJCURTTXudDHl3IDWltee2tOY9oNeeVurWXWXNeYhyCul7McxhRetQjZc69bgjnrauZDC8HnQQsfwZom6L9yBic+32Kp1ZzSX0iVlnHxooE5w73OS+FkEszc24fYiK3rz+O7XsHkEqriAR9GFOrrs/EUcT1JCTXgHW5djECfp82uRa11nU92V72Y/QHYvhW4jztR1brWcW91g5Zu6IoWjazZuvNAYOs3bl7thacm2XO6/l8M0EkdYzBebWLaTm39jTUrBtbRYZwdmfOa3XhqQbImSEmR0zNOWd+xFaCgcwNdSArPaFTO6kFRFutvDpWXebc3YCNNef2M6pRBOcJzal9YluUC4YjlJAWnGcC6MaQ37a2eEKCLTLndR0sdc7ET+a/gDWppdqPrC5iRIOZ7XbbIGsHcnXnNVtvDhgM4Zw77qJ8QnZs9+pCrtyWF7CzlVrucxLPWUf7nBfAmPnls74w4t6zrz+OwazLvtdrzr397siwEzJkzhmck1rgguOmIBYN4gtzJ+p/UUPBOWvO7ac1awq3byCOvmy2aWIb681HKiI4FwG0HZJ2QXM4gF29QzlZe53XkDYZPhurmXORAczJ2qubZjZFAkA30Fmr9eaAM63UTJDlvQK55txLiGs1kcpkt+3OnAOZhY1I0I/9wmTMykLStE8Cm/8EdM2rahzG41XXShuHESU1wv8C8H7mnME5sRUtOM/e7LzQSobUP50tEXx90YH5v6jB4Jw15/YRi+Yy59v3ZlpCOeECTuqDsDE4t8GpXdBsyJzX+2Tb6JRupc85kKu/352VtUer/BxEhqyjXjLnPgcz5ya9znPBeX2fb0ZE5jyRtLfmPOj3IeBTkEyrWha2LFn7nK8As5ebtkcrB59PQUPQr9W+1/v9wkmM91a/T/HcYpQRb787MuwEsxeMcFSkGRypaeQHrMs15zpDOGbObUH0Ot8rydppBjdyEQGMkLXb4dQuELL2j20wQqsFjE7p1lupCVm7PZ+DyNjXdubcebd2QN9SSqA9K+pcqWEkZJC1d9vY+tBoCleWrB2oOjAXyNdGvd8vnEQc86w3JaJBv+dL07x1NRPXMRrCsY0aqWmyhjAAaiBznnk4M3NuH0LW3t0fx9bd2ZpzBucjlnxZu33iQTGx35UNziN1nsk0BiqWa86zQYZopVZt0PGZI8dj6uhGLJreUdV+HGWYDOFEkKJzaxfPijo/34wI/yJhCGdXKzVANoXLzFPLypzbiJwt57O+MMb7dNRKP/o6h7J2YiuhbKZc9DkPMHNOaplUbpLjdnAuG8Kx5tweWrOZ8z39iVyPc8raRywiGydqdu2UtTdlzbpE4B+p80xYfubcYiu17PuWs1zV8Lk5XfjcnK6q9uE4ipIxglNTDgfnJplzza3dW8+KXJ/zjKu6WJAQpUrVIEzhtMx53J3gXK6b9noNdTUYFwobPd5GDWDmnNiMMXNulxMuIY6QloJz11up5SYjzJzbQ2vWpf/vH/ViIJGCTwEOGMXgfKRiXOyy1RAuGziJZ1+915Aaa87LNYQTjJigQ0jbh73m3JsLuVrNeSpTGy7k7XZkzhsMmXNhCGdZ1m4TOll7nd8vnCTg9+kXMpg5J6Q8csE5W6mROiAoBWp+d2+Hcs05M+f20JqdyP39n30AgHGxBu1zJiMP47G3Mzg3TuzrfbItZ87DAZ/l68aY1ap3BYFl5l8M7N4ExCaW3rZCTN3aE940hAtpwXlaqzf3+xRrjuolEIveQ3mGcMP7Gepk7SPlOqmQlkhQW/g0tqHzIt5/h2RYMba/oFs7qWnapwGfuhVodL+eUUyu5Mx5XfdKrgFEn/NUVmNLM7iRTV5wbrGO2gpGGXikzmXG8mKD1XpzwCRzPlLuYSfd5vhLFHdrr+/zzUhuLpnWepy3RAK2GIE1SIZwiVQa8exnONyZ8ygz55ZpjgSwsyfz9UhQ4zA4J7ZilLHTrZ3UPMdf4/YIADBz7gSxhpDuewbnIxtjcG6rW3vYGJzX9wRSzoBbrTcH8ifOIyHLNVyYurUnvenWLuaOcSlzbtf1Kj6rwURKc2oHXDCEY3BuGVnlxJpzQsokFNAH45S1E2KNXM15Suu/Wu8TfLcZZTAP6qIZ3IjGKP11ouZcUO+tkXw+RVtwKCdzbgzOKde1DzO39kGPytplQ7ju/mzm3KbrVW6lJiTtoYBv2D2SRNtBoP7vF04j34NGQuacwTmxFePNjbJ2QqwRlvuce9TkZ7gxZlomtTW6NBJSC+TL2llzXgzxnsqR+zYYslojRtY+DBTNnHvsWRE0qTm3K3OeM4RLu2YGB1DWXg6yeme4FQ5uUBdX8+bNm7FixQpMmTIFDQ0NmDZtGm699VbE43G3h0YMUNZOSGWIwGEwwcy5XQT8Pt2KO2XtI5uQ3zlZu1H67YVrV9TRl5M5Nxp2jYQs13AhzrG+eBLprI9GrubcW5+z7F+k1ZzbljnPPWvdMoMD9NlyL9wvnKRlhGXO62L5Yf369Uin03jooYdw4IEHYt26dbjooouwf/9+3H333W4Pj0gYMxMBX12s/xDiOmJyJVbyAe/VEbpBazSoZZomMjgf0RivJ+F+bQfezpxbD4qM8lzKde1DLJKoKtA7lESsIag5jte7AaERreY86WTmPFdz7kYds3yP4HVSHHnxk8F5jbB06VIsXbpU+37q1KnYsGEDfvzjHzM4rzGMmQlmzgmxhpAl9khOvBGPZUPcoLUhhG0YwKho0FYZM6k/wn5jcO5czbkXMmHNFWTOjQZwDDrsIxL0IxTwIZ5Mo3cwkQnOR5Cs3a77d8QkOKesvbbR15zXRehaFXX7Dru7u9HW1lZ0m6GhIQwNDWnf9/T0OD2sEU9ezTkz54RYQgvOsxMRReHilh20Zk3hJraz3nykIyu7fArQZOMkL9+tvf6ffXYYwkWDdTvNrElaIgF83BfX1EBebbspt1ITZV52Zc7NDOHcqGMWC1ehgA9+micXRefW7kIJwnBTl0+PjRs34oEHHsBXv/rVotvdeeediMVi2r+urq5hGuHIxRhMBBhcEGKJkJY5z0wWIgG/LT1dRzqt0Uw7tUl0ah/xyMF5cyQIn40TYuPk3gsZ44nZa6acLgfG4NwLn0MtYXRs92yfc3+u5txuWXtEZwjnXuZcZMuZNS9NywjLnLt6Na9cuRKKohT9t379et3fbN++HUuXLsXZZ5+Niy66qOj+b7zxRnR3d2v/tm3b5uTbITAzhPPWA4MQpxA156ms0Q/rze1hfGsEAHBwZ5PLIyFuI5tm2VlvDmQCfzlA8sKE+8oTD8JjF87HGbMmWP4bytqdxejYPpQQsnZvfc5yK7Ue24PzzL4HEinsj2c+PzeyseJa8cK9wmlGWis1V5cfrr32WixfvrzoNlOnTtW+/vDDD7F48WIcc8wxePjhh0vuPxwOIxwOVztMUgbG1Vv2OSfEGkYzRdab28NXF05D16goTvvEeLeHQlxGvsbsdGoXNEcCGOrLdJHxwoQ7Ggrg2ANHl/U3fp+i1UUD3vgcagkh7+0d8nbmXAvO5ZpzmxbUxDk55Lqs3Zf9n9dIKVp0hnDez5y7+g47OjrQ0dFhadvt27dj8eLFmD17NlavXg0fa5lrEvY5J6QyjJMrL9Ss1gJtjSF8+ehJbg+D1ABycO6EOWBTOFMPDHivBrgcoiE/4sk0wqyltR2RQewZMNace+t5IUokE6m0ZpLqNVn7tI4mKErmf1IcfZ9z799b62L5Yfv27Vi0aBEmTZqEu+++G7t27dJ+N3bsWBdHRowEA0ZZOx/MhFjBOLnymkyRELcJOx2cS9LLkZwxjgb92IfEiJCfDjfN2bZ2vYMic+5NWbtsCOdUzbnbhnCT2hvx6g2fRHtTaNhfu96gW3sN8vzzz2Pjxo3YuHEjDjjgAN3vVFV1aVTEjDxDOCocCLGEsQ0hM+eE2IvjsvZs4OT3KSN6YToaZi2tUwhpd67m3Nuy9v54Cv3ZunD7Wqll9q3rc+5CcA4A41sbXHndeoNu7TXI8uXLoaqq6T9SWwR9zJwTUglGGazXMiGEuI28AGa3IRyQy5w3BEd2pwWRMWctrf0IeW+PMXPuscVcca1+3Jdrh9ziQOZ8/1Dm82saAQFfPdMY8uOAUQ0YFQ2irdH7SoO6yJyT+sGXzRgkUpmFE7ZSI8Qaxsy51yZbhLiNLjh3QNbenM2+jXTVi8iYjwT56XCj1ZwbMudeMxAVJZK7sx4OzeGAbf4FOUO4dE7WznO1plEUBc9ecTxSaXVEJC54NhLbCfp9SKQyq5GUtRNijaBfgaIAQhA0Eh5AhAwnPp+CkN+HeCqNWNS5mvPICJdza5nzEf45OIFYVNJk7R43hNMk7TaWoegz5+4ZwpHycKIUqVbx1tVMagLZsZ2ydkKsoSiKLrM30rNvhDiBqDt3yq0dYFCq9W+mrN12cm7tCaTTKuIpUXPurc/aqCSzMzhv0Nza3a85J8QMzv6I7cjBOVupEWId2dTHa5MtQmoBLTh3suZ8hAelInNOt3b7aY7k3NpFYA541xBOELPxepUN4dx0ayekEN66mklNEJKy5QH2OCXEMiEpIGfmnBD7iWSDGGfc2rOy9hG+sEZDOOcQi0o9g0mt3hwYCcG5jbL27HmZVnO1+5S1k1qCZyOxHbnXufEGSwgpDDPnhDjLuQsm4/V/7MZhE2K271u0ReqMRWzfdz3REKK83ylapMy5cGr3+xTPqRRDAX1ix9bg3OTZOhLac5H6gcE5sZ2QTtbOzDkhVpFNfZg5J8R+vrZoGr62aJoj+140fQwe/PJsHDWp1ZH91wsnzhiD3/1tJ5bMHOv2UDyHqDkfTKTRm5Vkey1rDgAhvz5YttMjIuhX4FMymXMB3dpJLcGzkdiOzhCObu2EWEZe2GLmnJD6wu9TsPQwBqRzJrfh99cucnsYnkSWX3/cm+kB7sXgPOhg5lxRFDQE/difdYKPhvzwsQST1BDeu6KJ68iydmbOCbFOOMiac0IIIeYE/D40ZmumP872APfiQm5ezbnNrQ/ldoc0gyO1Bmd/xHZ0hnAeq4MixEnCusw5rx1CCCF6hGP7rt5BAN5cyDUG53a3PpSDc5rBkVrDe1c0cR1dKzVKhQixjL7m3HvZEEIIIdUhHNt39QlZu/eeFcY+53Z3V5AXNGgGR2oNBufEdkIBBueEVIKu5tyD2RBCCCHVITLnH/dmZe0efFYEDSWRLTYH53KbP5rBkVrDe1c0cR2dIRxl7YRYRpc592A2hBBCSHUIx/aP+7xrCOf3KVCk+Nz2zHmAsnZSu3jviiauw1ZqhFQGM+eEEEKKIeqvvSxrVxRF9zwUUn67oCEcqWU4+yO2I8uRAmylRohl5EkWM+eEEEKMaJlzD7dSA/SL1fbXnDM4J7WLN69o4ip6WTsz54RYRc6WM3NOCCHEiFZz3ufdmnMg15Y3EvTZrg6QDeGaaAhHagxvXtHEVXSGcKw5J8QyOlk7M+eEEEIMCIl3PJUG4N1nhUju2J01B4AGZs5JDcPIidgOW6kRUhn6Vmq8PRNCCNHTbOj57VVZu5hL2t3jHGCfc1LbePOKJq4iZ87p1k6IdUL+3ITBq9kQQgghldMS0QeTXg3OhZLMkcx5iJlzUrt484omrqIzhGPNOSGWYc05IYSQYhgzyXIW2EsEHQzOI9KCBoNzUmtw9kdsR2cIR7d2Qiwj15x7dcJFCCGkcppHSuY8+75aHAjOwzpZO5+1pLbw5hVNXEVvCMfMOSFW0WXOPTrhIoQQUjl5NeceXcgdNkO4EDPnpLbg7I/Yjpz9Y3BOiHXEtaMo+uuIEEIIAXJu7QKvLuRqhnBOyNrp1k5qGG9e0cRVKGsnpDJEBiQS8ENRuLBFCCFEz0hxaxcqTGcM4eQ+5wzOSW3hzSuauEqQmXNCKkJMsmgGRwghxIzGkB9yl1qvdvYY0xwBAHSNarB935EAM+ekduEZSWxHdmtnKzVCrCMyBRGPTrYIIYRUh6IoaI4E0T2QAODdxdxvnjIDpx4xDgsP7rB93+xzTmoZb17RxFV0hnA+Zs4JsQoz54QQQkohO7Z7VdY+qjGExYeMgd+BeaQIzn0KEOHzltQYPCOJ7chGVk7cVAnxKjPHxzBldCOWHjbW7aEQQgipUeS6c6/K2p1EBOSN4QD9XUjNQS0HsR0hZQ/6Fd70CCmDWEMQf7hukdvDIIQQUsO0jIDMuZNMbIsiFPBhemez20MhJA8G58R2gtkHRYBO7YQQQgghtqLLnHu0z7mTtDeF8afrF6MlYr8TPCHVwuCc2I4whKNTOyGEEEKIvTBzXj2dLRG3h0CIKbyiie2ENFk7Ty9CCCGEEDtpkXp/09CMEG/BK5rYTkiTtTNzTgghhBBiJ3q3dsraCfESlLUT2zm4sxmHjmvBgmntbg+FEEIIIcRTjIRWaoSMVBicE9uJBP3431ce7/YwCCGEEEI8RwtbqRHiWepmue20007DxIkTEYlEMG7cOJx77rn48MMP3R4WIYQQQgghw4berb1upvKEEAvUzRW9ePFi/Md//Ac2bNiAX/7yl9i0aRPOOusst4dFCCGEEELIsCHL2kM03yXEU9SNrP3qq6/Wvp40aRJWrlyJZcuWIZFIIBhkn0JCCCGEEOJ9hFt7yO+Dj+a7hHiKugnOZfbs2YPHHnsMxxxzTNHAfGhoCENDQ9r3PT09wzE8QgghhBBCHKG9MQQAaGmoy2k8IaQIdaWFueGGG9DY2Ij29nZs3boVv/71r4tuf+eddyIWi2n/urq6hmmkhBBCCCGE2E9XWxS3nHoo7jjjcLeHQgixGUVVVdWtF1+5ciX+9V//teg277//Pg455BAAwMcff4w9e/Zgy5YtWLVqFWKxGJ555hkoirmkxyxz3tXVhe7ubrS0tNj3RgghhBBCCCGEEBN6enoQi8VKxqGuBue7du3C7t27i24zdepUhEKhvJ9/8MEH6OrqwmuvvYYFCxZYej2rHwohhBBCCCGEEGIHVuNQV4tVOjo60NHRUdHfptNpANBlxgkhhBBCCCGEkHqkLpwk3nzzTbz11ls47rjjMGrUKGzatAk333wzpk2bZjlrTgghhBBCCCGE1Cp1YQgXjUbx1FNP4VOf+hSmT5+OFStW4IgjjsDLL7+McDjs9vAIIYQQQgghhJCqqIvM+eGHH47f//73bg+DEEIIIYQQQghxhLrInBNCCCGEEEIIIV6GwTkhhBBCCCGEEOIyDM4JIYQQQgghhBCXYXBOCCGEEEIIIYS4DINzQgghhBBCCCHEZRicE0IIIYQQQgghLlMXrdTsQlVVAEBPT4/LIyGEEEIIIYQQMhIQ8aeIRwsxooLz3t5eAEBXV5fLIyGEEEIIIYQQMpLo7e1FLBYr+HtFLRW+e4h0Oo0PP/wQzc3NUBTF7eGQMunp6UFXVxe2bduGlpYWt4dDbILH1XvwmHoTHlfvwWPqTXhcvQmPa32jqip6e3sxfvx4+HyFK8tHVObc5/PhgAMOcHsYpEpaWlp4U/IgPK7eg8fUm/C4eg8eU2/C4+pNeFzrl2IZcwEN4QghhBBCCCGEEJdhcE4IIYQQQgghhLgMg3NSN4TDYdx6660Ih8NuD4XYCI+r9+Ax9SY8rt6Dx9Sb8Lh6Ex7XkcGIMoQjhBBCCCGEEEJqEWbOCSGEEEIIIYQQl2FwTgghhBBCCCGEuAyDc0IIIYQQQgghxGUYnBNCCCGEEEIIIS7D4JzUPC+99BIURTH999ZbbwEANm/ebPr7N954w+XRk2JMnjw575jdddddum3effddHH/88YhEIujq6sJ3v/tdl0ZLSrF582asWLECU6ZMQUNDA6ZNm4Zbb70V8Xhctw2v1frjhz/8ISZPnoxIJIL58+fjz3/+s9tDImVw5513Yu7cuWhubsaYMWOwbNkybNiwQbfNokWL8q7LSy65xKURk1J861vfyjtehxxyiPb7wcFBXHrppWhvb0dTUxM++9nP4qOPPnJxxMQKZvMiRVFw6aWXAuB1OhIIuD0AQkpxzDHHYMeOHbqf3XzzzXjxxRcxZ84c3c9feOEFzJw5U/u+vb19WMZIKue2227DRRddpH3f3Nysfd3T04OTTz4ZJ554Ih588EG89957uOCCC9Da2oqLL77YjeGSIqxfvx7pdBoPPfQQDjzwQKxbtw4XXXQR9u/fj7vvvlu3La/V+uGJJ57ANddcgwcffBDz58/HfffdhyVLlmDDhg0YM2aM28MjFnj55Zdx6aWXYu7cuUgmk/iXf/kXnHzyyfjb3/6GxsZGbbuLLroIt912m/Z9NBp1Y7jEIjNnzsQLL7ygfR8I5Kb1V199NZ599lk8+eSTiMViuOyyy3DmmWfi1VdfdWOoxCJvvfUWUqmU9v26detw0kkn4eyzz9Z+xuvU2zA4JzVPKBTC2LFjte8TiQR+/etf4/LLL4eiKLpt29vbdduS2qe5ubngMXvssccQj8fx6KOPIhQKYebMmVi7di3uvfdeBuc1yNKlS7F06VLt+6lTp2LDhg348Y9/nBec81qtH+69915cdNFF+MpXvgIAePDBB/Hss8/i0UcfxcqVK10eHbHCc889p/t+zZo1GDNmDN5++20sXLhQ+3k0GuV1WUcEAgHT49Xd3Y2f/vSnePzxx/HJT34SALB69WrMmDEDb7zxBo4++ujhHiqxSEdHh+77u+66C9OmTcMJJ5yg/YzXqbehrJ3UHb/5zW+we/dubaIoc9ppp2HMmDE47rjj8Jvf/MaF0ZFyueuuu9De3o5Zs2bhe9/7HpLJpPa7119/HQsXLkQoFNJ+JjJ2e/fudWO4pEy6u7vR1taW93Neq/VBPB7H22+/jRNPPFH7mc/nw4knnojXX3/dxZGRauju7gaAvGvzsccew+jRo3HYYYfhxhtvRH9/vxvDIxb5+9//jvHjx2Pq1Kk455xzsHXrVgDA22+/jUQiobtuDznkEEycOJHXbR0Rj8fx7//+77jgggt0yShep96GmXNSd/z0pz/FkiVLcMABB2g/a2pqwj333INjjz0WPp8Pv/zlL7Fs2TI8/fTTOO2001wcLSnGFVdcgaOOOgptbW147bXXcOONN2LHjh249957AQA7d+7ElClTdH/T2dmp/W7UqFHDPmZinY0bN+KBBx7QZc15rdYXH3/8MVKplHbdCTo7O7F+/XqXRkWqIZ1O46qrrsKxxx6Lww47TPv5l770JUyaNAnjx4/Hu+++ixtuuAEbNmzAU0895eJoSSHmz5+PNWvWYPr06dixYwdWrVqF448/HuvWrcPOnTsRCoXQ2tqq+5vOzk7s3LnTnQGTsnn66aexb98+LF++XPsZr9MRgEqIS9xwww0qgKL/3n//fd3fbNu2TfX5fOovfvGLkvs/99xz1eOOO86p4ZMCVHJcBT/96U/VQCCgDg4OqqqqqieddJJ68cUX67b561//qgJQ//a3vzn+XkiGSo7pBx98oE6bNk1dsWJFyf3zWq1dtm/frgJQX3vtNd3Pv/GNb6jz5s1zaVSkGi655BJ10qRJ6rZt24pu9+KLL6oA1I0bNw7TyEg17N27V21paVEfeeQR9bHHHlNDoVDeNnPnzlWvv/56F0ZHKuHkk09WTz311KLb8Dr1HsycE9e49tprdauBZkydOlX3/erVq9He3m4pwzZ//nw8//zz1QyRVEAlx1Uwf/58JJNJbN68GdOnT8fYsWPz3GXF96y3Gj7KPaYffvghFi9ejGOOOQYPP/xwyf3zWq1dRo8eDb/fb3od8hqsPy677DI888wz+OMf/6hTn5kxf/58ABkFzLRp04ZjeKQKWltbcfDBB2Pjxo046aSTEI/HsW/fPl32nNdt/bBlyxa88MILJTPivE69B4Nz4hodHR15xhfFUFUVq1evxnnnnYdgMFhy+7Vr12LcuHHVDJFUQLnHVWbt2rXw+XyaA/SCBQtw0003IZFIaMf8+eefx/Tp0ylpH0bKOabbt2/H4sWLMXv2bKxevRo+X2lrE16rtUsoFMLs2bPx4osvYtmyZQAysugXX3wRl112mbuDI5ZRVRWXX345fvWrX+Gll17KKxcyY+3atQDAa7NO6Ovrw6ZNm3Duuedi9uzZCAaDePHFF/HZz34WALBhwwZs3boVCxYscHmkxAqrV6/GmDFjcMoppxTdjtep92BwTuqG3//+9/if//kfXHjhhXm/+9nPfoZQKIRZs2YBAJ566ik8+uijeOSRR4Z7mMQir7/+Ot58800sXrwYzc3NeP3113H11Vfjy1/+shZ4f+lLX8KqVauwYsUK3HDDDVi3bh3uv/9+fP/733d59MSM7du3Y9GiRZg0aRLuvvtu7Nq1S/udyNbwWq0/rrnmGpx//vmYM2cO5s2bh/vuuw/79+83NeUktcmll16Kxx9/HL/+9a/R3Nys1R3HYjE0NDRg06ZNePzxx/HpT38a7e3tePfdd3H11Vdj4cKFOOKII1wePTHjuuuuw2c+8xlMmjQJH374IW699Vb4/X588YtfRCwWw4oVK3DNNdegra0NLS0tuPzyy7FgwQI6tdcB6XQaq1evxvnnn69rj8frdITgtq6eEKt88YtfVI855hjT361Zs0adMWOGGo1G1ZaWFnXevHnqk08+OcwjJOXw9ttvq/Pnz1djsZgaiUTUGTNmqN/5zne0enPBO++8ox533HFqOBxWJ0yYoN51110ujZiUYvXq1QVr0gW8VuuTBx54QJ04caIaCoXUefPmqW+88YbbQyJlUOi6XL16taqqqrp161Z14cKFaltbmxoOh9UDDzxQ/cY3vqF2d3e7O3BSkM9//vPquHHj1FAopE6YMEH9/Oc/r6s7HhgYUL/+9a+ro0aNUqPRqHrGGWeoO3bscHHExCq//e1vVQDqhg0bdD/ndToyUFRVVV1ZFSCEEEIIIYQQQggA9jknhBBCCCGEEEJch8E5IYQQQgghhBDiMgzOCSGEEEIIIYQQl2FwTgghhBBCCCGEuAyDc0IIIYQQQgghxGUYnBNCCCGEEEIIIS7D4JwQQgghhBBCCHEZBueEEEIIIYQQQojLMDgnhBBCDCxatAhXXXVVzezHjOXLl2PZsmVV7WPy5MlQFAWKomDfvn0Ft1uzZg1aW1urei2vIT43fi6EEELsgsE5IYQQUiUvvfSSaYD71FNP4dvf/rb2/eTJk3HfffcN7+BKcNttt2HHjh2IxWJuD8V11qxZA0VRsHTpUt3P9+3bB0VR8NJLL2k/27FjR80dS0IIIfUNg3NCCCHEIdra2tDc3Oz2MIrS3NyMsWPHQlEUt4eCRCLh9hAQCATwwgsv4A9/+EPR7caOHcsFDUIIIbbC4JwQQggpwb/9279hzpw5WiD7pS99Cf/85z8BAJs3b8bixYsBAKNGjYKiKFi+fDkAvax90aJF2LJlC66++mpNEg0A3/rWt/CJT3xC93r33XcfJk+erH2fSqVwzTXXoLW1Fe3t7bj++uuhqqrub9LpNO68805MmTIFDQ0NOPLII/GLX/yiove7Zs0aTJw4EdFoFGeccQZ2796dt82vf/1rHHXUUYhEIpg6dSpWrVqFZDKp/X79+vU47rjjEIlEcOihh+KFF16Aoih4+umntc9NURQ88cQTOOGEExCJRPDYY48BAB555BHMmDEDkUgEhxxyCH70ox/pXnvbtm343Oc+h9bWVrS1teH000/H5s2btd+/9NJLmDdvHhobG9Ha2opjjz0WW7ZssfTeGxsbccEFF2DlypVlfmqEEEJIdTA4J4QQQkqQSCTw7W9/G++88w6efvppbN68WQvAu7q68Mtf/hIAsGHDBuzYsQP3339/3j6eeuopHHDAAZqMfMeOHZZf/5577sGaNWvw6KOP4pVXXsGePXvwq1/9SrfNnXfeiZ///Od48MEH8de//hVXX301vvzlL+Pll18u672++eabWLFiBS677DKsXbsWixcvxu23367b5k9/+hPOO+88XHnllfjb3/6Ghx56CGvWrMEdd9wBILOYsGzZMkSjUbz55pt4+OGHcdNNN5m+3sqVK3HllVfi/fffx5IlS/DYY4/hlltuwR133IH3338f3/nOd3DzzTfjZz/7GYDMsViyZAmam5vxpz/9Ca+++iqampqwdOlSxONxJJNJLFu2DCeccALeffddvP7667j44ovLUgZ861vfwnvvvVfx4gYhhBBSCQG3B0AIIYTUOhdccIH29dSpU/GDH/wAc+fORV9fH5qamtDW1gYAGDNmTEGDsLa2Nvj9fi37Xg733XcfbrzxRpx55pkAgAcffBC//e1vtd8PDQ3hO9/5Dl544QUsWLBAG+crr7yChx56CCeccILl17r//vuxdOlSXH/99QCAgw8+GK+99hqee+45bZtVq1Zh5cqVOP/887XX+va3v43rr78et956K55//nls2rQJL730kvZe77jjDpx00kl5r3fVVVdp7wsAbr31Vtxzzz3az6ZMmaItAJx//vl44oknkE6n8cgjj2gB9+rVq9Ha2oqXXnoJc+bMQXd3N0499VRMmzYNADBjxgzL7x8Axo8fjyuvvBI33XRT1aZ7hBBCiFWYOSeEEEJK8Pbbb+Mzn/kMJk6ciObmZi3Y3bp1q+Ov3d3djR07dmD+/PnazwKBAObMmaN9v3HjRvT39+Okk05CU1OT9u/nP/85Nm3aVNbrvf/++7rXAqAF/IJ33nkHt912m+61LrroIuzYsQP9/f3YsGEDurq6dIsQ8+bNM309+X3s378fmzZtwooVK3T7vv3227X38c4772Djxo1obm7Wft/W1obBwUFs2rQJbW1tWL58OZYsWYLPfOYzuP/++8tSKQhuuOEG7Nq1C48++mjZf0sIIYRUAjPnhBBCSBH279+PJUuWaJLrjo4ObN26FUuWLEE8Hq96/z6fL69+vFxjtL6+PgDAs88+iwkTJuh+Fw6HqxtggddbtWqVLuMtiEQiZe2rsbFRt18A+MlPfpK3QOD3+7VtZs+erdWny3R0dADIZNKvuOIKPPfcc3jiiSfwzW9+E88//zyOPvpoy+NqbW3FjTfeiFWrVuHUU08t6z0RQgghlcDgnBBCCCnC+vXrsXv3btx1113o6uoCAPzlL3/RbRMKhQBkaq2LEQqF8rbp6OjAzp07oaqqJtNeu3at9vtYLIZx48bhzTffxMKFCwEAyWQSb7/9No466igAwKGHHopwOIytW7eWJWE3Y8aMGXjzzTd1P3vjjTd03x911FHYsGEDDjzwQNN9TJ8+Hdu2bcNHH32Ezs5OAMBbb71V8rU7Ozsxfvx4/OMf/8A555xjus1RRx2FJ554AmPGjEFLS0vBfc2aNQuzZs3CjTfeiAULFuDxxx8vKzgHgMsvvxw/+MEPTD0ECCGEELthcE4IIYQUYeLEiQiFQnjggQdwySWXYN26dbre5QAwadIkKIqCZ555Bp/+9KfR0NCApqamvH1NnjwZf/zjH/GFL3wB4XAYo0ePxqJFi7Br1y5897vfxVlnnYXnnnsO//mf/6kLPK+88krcddddOOigg3DIIYfg3nvv1fVUb25uxnXXXYerr74a6XQaxx13HLq7u/Hqq6+ipaVFqw23whVXXIFjjz0Wd999N04//XT89re/1dWbA8Att9yCU089FRMnTsRZZ50Fn8+Hd955B+vWrcPtt9+Ok046CdOmTcP555+P7373u+jt7cU3v/lNAChpzLZq1SpcccUViMViWLp0KYaGhvCXv/wFe/fuxTXXXINzzjkH3/ve93D66afjtttuwwEHHIAtW7bgqaeewvXXX49EIoGHH34Yp512GsaPH48NGzbg73//O8477zzLn4EgEolg1apVuPTSS8v+W0IIIaRcWHNOCCGEFKGjowNr1qzBk08+iUMPPRR33XUX7r77bt02EyZM0EzSOjs7cdlll5nu67bbbsPmzZsxbdo0TYI9Y8YM/OhHP8IPf/hDHHnkkfjzn/+M6667Tvd31157Lc4991ycf/75WLBgAZqbm3HGGWfotvn2t7+Nm2++GXfeeSdmzJiBpUuX4tlnn8WUKVPKer9HH300fvKTn+D+++/HkUceid/97ndaYC1YsmQJnnnmGfzud7/D3LlzcfTRR+P73/8+Jk2aBCAjQX/66afR19eHuXPn4sILL9Tc2kvJ3i+88EI88sgjWL16NQ4//HCccMIJWLNmjfY+otEo/vjHP2LixIk488wzMWPGDKxYsQKDg4NoaWlBNBrF+vXr8dnPfhYHH3wwLr74Ylx66aX46le/WtbnIDj//PMxderUiv6WEEIIKQdFNRa6EUIIIWREMHnyZFx11VVaL3YnefXVV3Hcccdh48aNmot6vbNmzRpcddVVOhUDIYQQUikMzgkhhJARyuTJk7Fjxw4Eg0Fs374dsVjMtn3/6le/QlNTEw466CBs3LgRV155JUaNGoVXXnnFttdwk6amJiSTSUQiEQbnhBBCbIE154QQQsgI5eWXX9ac4Zubm23dd29vL2644QZs3boVo0ePxoknnoh77rnH1tcol5kzZ2LLli2mv3vooYcKmtCZIUz7hIs8IYQQUi3MnBNCCCFkRLBly5aCbeo6OzttX6AghBBCyoHBOSGEEEIIIYQQ4jJ0ayeEEEIIIYQQQlyGwTkhhBBCCCGEEOIyDM4JIYQQQgghhBCXYXBOCCGEEEIIIYS4DINzQgghhBBCCCHEZRicE0IIIYQQQgghLsPgnBBCCCGEEEIIcZn/C1AELa27wlJPAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# Part 3: Make a plot that contains results from each case\n", + "# --------------------------------------------------------\n", + "print(\"Let's plot!\")\n", + "print(\"--------------------------------------\")\n", + "\n", + "# set up the figure\n", + "fig = plt.figure(figsize=(12, 4))\n", + "ax = plt.subplot(1, 1, 1)\n", + "\n", + "# loop over cases\n", + "for k, v in tas_arrays.items():\n", + " v.plot(ax=ax, label=k)\n", + "\n", + "# add legend\n", + "plt.legend()\n", + "\n", + "# add title\n", + "plt.title(\"Zonal Mean Surface Air Temperature Anomaly\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "d97c51b5-4c3a-4242-913e-efba9b05d38d", + "metadata": {}, + "outputs": [], + "source": [ + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "f0dc40a5-ae0a-4cbd-bd25-f55462cfa46b", + "metadata": {}, + "source": [ + "## Save the plots in work directory " + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "63556ef5-07dd-4cad-a050-08bbd9c15d4a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Last log message by example_multicase POD: finished successfully!\n" + ] + }, + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "# save the plot in the right location\n", + "work_dir = os.environ[\"WORK_DIR\"]\n", + "assert os.path.isdir(f\"{work_dir}/model/PS\"), f'Assertion error: {work_dir}/model/PS not found'\n", + "\n", + "plt.savefig(f\"{work_dir}/model/PS/example_model_plot.eps\", bbox_inches=\"tight\")\n", + "\n", + "# Part 4: Close the catalog files and\n", + "# release variable dict reference for garbage collection\n", + "# ------------------------------------------------------\n", + "cat.close()\n", + "tas_dict = None\n", + "\n", + "\n", + "# Part 5: Confirm POD executed successfully\n", + "# ----------------------------------------\n", + "print(\"Last log message by example_multicase POD: finished successfully!\")\n", + "sys.exit(0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec735c6a-6761-47cd-92e4-6904e7e939eb", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "intake", + "language": "python", + "name": "intake" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diagnostics/example_multicase/multirun_config_demo1.jsonc b/diagnostics/example_multicase/multirun_config_demo1.jsonc new file mode 100644 index 000000000..b7fa95752 --- /dev/null +++ b/diagnostics/example_multicase/multirun_config_demo1.jsonc @@ -0,0 +1,89 @@ +{ + "pod_list" : [ + "example_multicase" + ], + "case_list": + { + "c384L65_am5f3b1r0_amip": + { + "model" : "test", + "convention" : "CMIP", + "startdate" : 19810101, + "enddate" : 19821231 + }, + "c384L65_am5f3b1r0_amip": + { + "model" : "test", + "convention" : "CMIP", + "startdate" : 19810101, + "enddate" : 19821231 + } + }, + // PATHS --------------------------------------------------------------------- + // Location of supporting data downloaded when the framework was installed. + // If a relative path is given, it's resolved relative to the MDTF-diagnostics + // code directory. Environment variables (eg, $HOME) can be referenced with a + // "$" and will be expended to their current values when the framework runs. + // Full path to model data ESM-intake catalog header file + "DATA_CATALOG":"/home/a1r/github/MDTF-diagnostics/diagnostics/example_multicase/c384L65_am5f3b1r0_amip.json", + + // Backwards compatibility + "MODEL_DATA_ROOT": "../mdtf_test_data", + + // Parent directory containing observational data used by individual PODs. + "OBS_DATA_ROOT": "../inputdata/obs_data", + + // Working directory. + "WORK_DIR": "../wkdir", + + // Directory to write output. The results of each run of the framework will be + // put in a subdirectory of this directory. Defaults to WORKING_DIR if blank. + "OUTPUT_DIR": "../wkdir", + + // Location of the Anaconda/miniconda or micromamba installation to use for managing + // dependencies (path returned by running `[conda | micromamba] info`.) If empty, + // framework will attempt to determine location of system's conda installation. + "conda_root": "~/.local/bin", + + // Directory containing the framework-specific conda environments. This should + // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left + // blank, the framework will look for its environments in conda_root/envs + "conda_env_root": "~/miniconda3/envs", + + // Path to micromamba executable if using micromamba + "micromamba_exe": "", + + // SETTINGS ------------------------------------------------------------------ + // Any command-line option recognized by the mdtf script + // can be set here, in the form "flag name": "desired setting". + + // Settings affecting what output is generated: + // Set to true to run the preprocessor; default true: + "run_pp": true, + + // Set to true to perform data translation; default false: + "translate_data": true, + + // Set to true to have PODs save postscript figures in addition to bitmaps. + "save_ps": false, + + // Set to true for files > 4 GB + "large_file": false, + + // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs + // run to completion + "save_pp_data": true, + + // Set to true to save HTML and bitmap plots in a .tar file. + "make_variab_tar": false, + + // Generate html output for multiple figures per case + "make_multicase_figure_html": false, + + // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved + // under a unique name. + "overwrite": false, + // List with custom preprocessing script(s) to run on data + // Place these scripts in the user_scripts directory of your copy of the MDTF-diagnostics repository + "user_pp_scripts" : [] +} diff --git a/diagnostics/example_multicase/multirun_config_demo2.jsonc b/diagnostics/example_multicase/multirun_config_demo2.jsonc new file mode 100644 index 000000000..5e2517b49 --- /dev/null +++ b/diagnostics/example_multicase/multirun_config_demo2.jsonc @@ -0,0 +1,90 @@ +{ + "pod_list" : [ + "example_multicase" + ], + "case_list" : + { + "c384L65_am5f3b1r0_amip": + { + "model" : "test", + "convention" : "CMIP", + "startdate" : 19810101, + "enddate" : 19821231 + }, + "c96L65_am5f3b1r0_pdclim1850F": + { + "model" : "test", + "convention" : "CMIP", + "startdate" : 00050101, + "enddate" : 00061231 + } + }, + // PATHS --------------------------------------------------------------------- + // Location of supporting data downloaded when the framework was installed. + // If a relative path is given, it's resolved relative to the MDTF-diagnostics + // code directory. Environment variables (eg, $HOME) can be referenced with a + // "$" and will be expended to their current values when the framework runs. + // Full path to model data ESM-intake catalog header file + "DATA_CATALOG": "/home/a1r/github/aparna/MDTF-diagnostics/diagnostics/example_multicase/amip_c96L65_am5f3b1r0_pdclim1850F_combined.json", + + // Backwards compatibility + "MODEL_DATA_ROOT": "../mdtf_test_data", + + // Parent directory containing observational data used by individual PODs. + "OBS_DATA_ROOT": "../inputdata/obs_data", + + // Working directory. + "WORK_DIR": "../wkdir", + + // Directory to write output. The results of each run of the framework will be + // put in a subdirectory of this directory. Defaults to WORKING_DIR if blank. + "OUTPUT_DIR": "../wkdir", + + // Location of the Anaconda/miniconda or micromamba installation to use for managing + // dependencies (path returned by running `[conda | micromamba] info`.) If empty, + // framework will attempt to determine location of system's conda installation. + "conda_root": "~/.local/bin", + + + // Directory containing the framework-specific conda environments. This should + // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left + // blank, the framework will look for its environments in conda_root/envs + "conda_env_root": "~/miniconda3/envs", + + // Path to micromamba executable if using micromamba + "micromamba_exe": "", + + // SETTINGS ------------------------------------------------------------------ + // Any command-line option recognized by the mdtf script + // can be set here, in the form "flag name": "desired setting". + + // Settings affecting what output is generated: + // Set to true to run the preprocessor; default true: + "run_pp": true, + + // Set to true to perform data translation; default false: + "translate_data": true, + + // Set to true to have PODs save postscript figures in addition to bitmaps. + "save_ps": false, + + // Set to true for files > 4 GB + "large_file": false, + + // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs + // run to completion + "save_pp_data": true, + + // Set to true to save HTML and bitmap plots in a .tar file. + "make_variab_tar": false, + + // Generate html output for multiple figures per case + "make_multicase_figure_html": false, + + // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved + // under a unique name. + "overwrite": false, + // List with custom preprocessing script(s) to run on data + // Place these scripts in the user_scripts directory of your copy of the MDTF-diagnostics repository + "user_pp_scripts" : [] +} diff --git a/diagnostics/example_multicase/multirun_config_template.jsonc b/diagnostics/example_multicase/multirun_config_template.jsonc index ad5a33d94..2e66b93ae 100644 --- a/diagnostics/example_multicase/multirun_config_template.jsonc +++ b/diagnostics/example_multicase/multirun_config_template.jsonc @@ -25,38 +25,41 @@ "pod_list" : [ "example_multicase" ], - // Each CASENAME corresponds to a different simulation/output dataset - "case_list" : [ - { - "CASENAME" : "CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231", - "model" : "test", - "convention" : "CMIP", - "FIRSTYR" : 1980, - "LASTYR" : 1984 - }, - { - "CASENAME": "CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231", - "model" : "test", - "convention" : "CMIP", - "FIRSTYR" : 1985, - "LASTYR" : 1989 - } - ], + // Each case corresponds to a different simulation/output dataset + "case_list": + { + "CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231": + { + "model": "test", + "convention": "CMIP", + "startdate": "19800101", + "enddate": "19841231" + }, + "CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231": + { + "model": "test", + "convention": "CMIP", + "startdate": "19850101", + "enddate": "19891231" + } + }, + // PATHS --------------------------------------------------------------------- // Location of supporting data downloaded when the framework was installed. - // If a relative path is given, it's resolved relative to the MDTF-diagnostics // code directory. Environment variables (eg, $HOME) can be referenced with a // "$" and will be expended to their current values when the framework runs. + // Full path to model data ESM-intake catalog header file + "DATA_CATALOG": "/net/jml/mdtf/MDTF-diagnostics/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json", + + // Backwards compatibility + "MODEL_DATA_ROOT": "../inputdata/mdtf_test_data", // Parent directory containing observational data used by individual PODs. "OBS_DATA_ROOT": "../inputdata/obs_data", - // Parent directory containing results from different models. - "MODEL_DATA_ROOT": "../mdtf_test_data", - // Working directory. - "WORKING_DIR": "../wkdir", + "WORK_DIR": "../wkdir", // Directory to write output. The results of each run of the framework will be // put in a subdirectory of this directory. Defaults to WORKING_DIR if blank. @@ -65,56 +68,47 @@ // Location of the Anaconda/miniconda or micromamba installation to use for managing // dependencies (path returned by running `[conda | micromamba] info`.) If empty, // framework will attempt to determine location of system's conda installation. - "conda_root": "", + "conda_root": "/net/jml/miniconda3", // Directory containing the framework-specific conda environments. This should // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left - // blank, the framework will look for its environments in the system default - // location. - "conda_env_root": "", + // blank, the framework will look for its environments in conda_root/envs + "conda_env_root": "/net/jml/miniconda3/envs", // Path to micromamba executable if using micromamba "micromamba_exe": "", + // SETTINGS ------------------------------------------------------------------ // Any command-line option recognized by the mdtf script (type `mdtf --help`) // can be set here, in the form "flag name": "desired setting". - // - // Type of data for the framework to process: `single_run` (default) - // for PODs that compare output from a single simulation to observational data - // or `multi_run` for PODs that analyze output from multiple simulations and/or - // observational datasets - "data_type": "multi_run", - - // Method used to fetch model data. - "data_manager": "Local_File", - - // Method used to manage dependencies. - "environment_manager": "Conda", // Settings affecting what output is generated: + // Set to true to run the preprocessor; default true: + "run_pp": true, + + // Set to true to perform data translation; default false: + "translate_data": true, // Set to true to have PODs save postscript figures in addition to bitmaps. "save_ps": false, - // Set to true to have PODs save netCDF files of processed data. - "save_nc": false, + // Set to true for files > 4 GB + "large_file": false, + + // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs + // run to completion + "save_pp_data": true, // Set to true to save HTML and bitmap plots in a .tar file. "make_variab_tar": false, + // Generate html output for multiple figures per case + "make_multicase_figure_html": false, + // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved // under a unique name. "overwrite": false, - - // Settings used in debugging: - - // Log verbosity level. - "verbose": 1, - - // Set to true for framework test. Data is fetched but PODs are not run. - "test_mode": false, - - // Set to true for framework test. No external commands are run and no remote - // data is copied. Implies test_mode. - "dry_run": false + // List with custom preprocessing script(s) to run on data + // Place these scripts in the user_scripts directory of your copy of the MDTF-diagnostics repository + "user_pp_scripts" : ["example_pp_script.py"] } diff --git a/diagnostics/example_multicase/settings.jsonc b/diagnostics/example_multicase/settings.jsonc index f4f456bcb..13325ff6f 100644 --- a/diagnostics/example_multicase/settings.jsonc +++ b/diagnostics/example_multicase/settings.jsonc @@ -17,7 +17,7 @@ "description" : "Example diagnostic with multiple cases", "driver" : "example_multicase.py", "long_name" : "Example diagnostic with multiple cases", - "realm" : "atmos", + "convention": "cmip", "runtime_requirements": { "python3": ["matplotlib", "xarray", "netCDF4"] } @@ -25,8 +25,16 @@ // Variable Coordinates "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"} }, @@ -34,6 +42,7 @@ "varlist" : { "tas": { "frequency" : "day", + "realm": "atmos", "dimensions": ["time", "lat", "lon"], "modifier": "atmos_height", "standard_name" : "air_temperature", diff --git a/diagnostics/example_notebook/Test_Notebook.ipynb b/diagnostics/example_notebook/Test_Notebook.ipynb new file mode 100644 index 000000000..e9816becc --- /dev/null +++ b/diagnostics/example_notebook/Test_Notebook.ipynb @@ -0,0 +1,210 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "eed42503-3bd5-4804-96b3-83e685e46038", + "metadata": {}, + "source": [ + "# Test Notebook" + ] + }, + { + "cell_type": "markdown", + "id": "cc759e47-853d-4ddf-8395-ea977bf95625", + "metadata": {}, + "source": [ + "Tests the core MAR functionality." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5de0d861", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9a00156b-27c4-4edc-b006-8b358ca52954", + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import os\n", + "import yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d1c2d10", + "metadata": {}, + "outputs": [], + "source": [ + "case_env_file = os.environ['case_env_file']\n", + "with open(case_env_file, 'r') as stream:\n", + " try:\n", + " case_info = yaml.safe_load(stream)\n", + " except yaml.YAMLError as exc:\n", + " print(exc)\n", + "\n", + "\n", + "cat_def_file = case_info['CATALOG_FILE']\n", + "case_list = case_info['CASE_LIST']\n", + "# all cases share variable names and dimension coords, so just get first result for each\n", + "tas_var = [case['tas_var'] for case in case_list.values()][0]\n", + "time_coord = [case['time_coord'] for case in case_list.values()][0]\n", + "lat_coord = [case['lat_coord'] for case in case_list.values()][0]\n", + "lon_coord = [case['lon_coord'] for case in case_list.values()][0]" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "06350542-190e-4e11-9868-39ac187acf10", + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"dora_id\": \"odiv-2\",\n", + " \"pathPP\": \"/path/to/some/experiment\"\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "06d87e68-d4c5-4e85-b036-93da28e88d37", + "metadata": {}, + "outputs": [], + "source": [ + "for k, v in config.items():\n", + " config[k] = (\n", + " os.environ[f\"MAR_{k.upper()}\"]\n", + " if f\"MAR_{k.upper()}\" in os.environ.keys()\n", + " else v\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "41b887a3-977a-43b7-991f-4f84bad93c99", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'dora_id': 'odiv-2', 'pathPP': '/path/to/some/experiment'}\n" + ] + } + ], + "source": [ + "print(config)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f5f3f13c-c770-43ec-a3a0-47e70d49489a", + "metadata": {}, + "outputs": [], + "source": [ + "x = np.linspace(1,100)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "51b5e7f6-941f-472e-98b2-a8413e9186e7", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0.02, 0.9, 'odiv-2')" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAABV0UlEQVR4nO3dd3QV5f7+/fdO2+mBENIghNB7SEClKaIIIoII0sJRPCqPhVAOYMEGNsCGiqDH4/FgI4AFBBQLqDRRgRR6J0CAhNDSIH3fzx9+3T8jNZCwU67XWrMWM3PP7M/cbJnLuWf2WIwxBhEREZEKxMnRBYiIiIj8nQKKiIiIVDgKKCIiIlLhKKCIiIhIhaOAIiIiIhWOAoqIiIhUOAooIiIiUuEooIiIiEiF4+LoAi6HzWbjyJEj+Pj4YLFYHF2OiIiIXAJjDNnZ2YSGhuLkdOFrJJUyoBw5coSwsDBHlyEiIiKXISUlhbp1616wTaUMKD4+PsAfB+jr6+vgakRERORSZGVlERYWZj+PX0ilDCh/Duv4+voqoIiIiFQyl3J7hm6SFRERkQpHAUVEREQqHAUUERERqXAUUERERKTCUUApY/v378disZCUlOToUuQKrFixAovFQkZGhqNLERGplqpVQLn33nt54oknynR//fr1u+ztV6xYQUhICMYYbrzxRsaOHXtFtZTlsZWXDz/8kA4dOji6jIvq1KkTqamp+Pn5ObqUK/6eiYhURpXyMePLYbPZ+Oabb1i8eLGjS7FbvHgxffv2veJfw62Ix3Y+ixcv5o477nB0GRdUWFiIm5sbwcHBji5FRKT6MpVQZmamAUxmZuYlb7Nq1SoTGBhoiouLTXJysgHM3LlzTceOHY3VajUtWrQwP//8s719UVGRue+++0z9+vWNu7u7adKkiXnzzTft6ydNmmSAEtPPP/9s3/eXX35pbrzxRuPh4WHatGlj1q5de1ZNDRs2NF9//bUZPnz4WftKTk42xhizYsUKc8011xg3NzcTHBxsHn/8cVNYWHjeY8vPzzcjR440wcHBxmq1mvDwcDNlyhR72wMHDpi+ffsaLy8v4+PjYwYOHGjS0tJKHFdkZKT54IMPTFhYmPHy8jIPPfSQKSoqMi+//LIJCgoytWvXNi+++GKJGjIyMsyIESNM7dq1jY+Pj+nWrZtJSkoq0SY3N9d4eXmZLVu2GGOMyc/PN48++qgJDQ01np6e5tprr7X/HeTm5poWLVqYESNG2Lfft2+f8fX1Nf/5z3+MMcbMnj3b+Pn5mYULF5rGjRsbq9Vqunfvbg4ePFjicxcvXmyio6ON1Wo1ERERZvLkySX6EDDvvvuu6du3r/H09DTPPvus+fnnnw1gTp06VeKzlixZYpo0aWI8PDzMgAEDTE5Ojvnwww9NeHi4qVGjhomNjTVFRUX2fV/oGP+63++++840a9bMeHl5mZ49e5ojR45c8HsmIlIZleb8XW0CyoQJE8z9999vjDH2EFG3bl3zxRdfmG3btpkHHnjA+Pj4mOPHjxtjjCkoKDDPPvusWbdundm3b5/59NNPjaenp5k/f74xxpjs7GwzaNAgc+utt5rU1FSTmppq8vPz7ftu1qyZ+frrr83OnTvNXXfdZcLDw0ucFLds2WK8vLxMbm6uycjIMB07djQjRoyw76uoqMgcOnTIeHp6mkceecRs377dLFy40AQEBJhJkyad99heffVVExYWZlatWmX2799vVq9ebeLi4owxxthsNhMVFWW6dOliNmzYYH777TcTHR1tunbtat/XpEmTjLe3t7nrrrvM1q1bzeLFi42bm5vp2bOnGTVqlNmxY4f53//+ZwDz66+/2vfbuXNn06dPH7N+/Xqza9cuM378eFOrVi1z4sQJ+76//vpr07BhQ/t8TEyM6dSpk1m1apXZs2ePefXVV43VajW7du0yxhiTmJho3NzczMKFC01RUZHp3LmzueOOO+zbz54927i6upr27dubtWvXmg0bNphrr73WdOrUyd7mu+++M76+vubDDz80e/fuNT/88IOpX7++mTx5sr0NYAIDA80HH3xg9u7da/bv33/OgOLq6mpuueUWk5CQYFauXGlq1aplevToYQYNGmS2bt1qlixZYtzc3My8efMu+Rj/3G/37t3N+vXrTXx8vGnevLmJiYm54PdMRKQyKreAMmXKFNO+fXvj7e1tateube644w6zY8eOEm1sNpuZNGmSCQkJMe7u7qZr1672/2P+U15enomNjTW1atUynp6epk+fPiYlJeWS67icgNKkSROzePFiY8z/CyjTpk2zry8sLDR169Y1L7/88nn38cgjj5gBAwbY54cPH17ihPnXff/3v/+1L9u6dasBzPbt2+3LXnrpJdO/f3/7fNeuXc2YMWNK7OvJJ580TZs2NTabzb5s1qxZxtvb2xQXF5/z2EaNGmVuuummEtv86YcffjDOzs4lrjD8Wdu6deuMMX8EFE9PT5OVlWVv07NnT1O/fv0Sn9m0aVMzdepUY4wxP/74o/H19TV5eXklPq9hw4bmvffes8+PGDHCjBs3zhhjzJ49e4zFYjGHDx8usc3NN99sJk6caJ9/5ZVXTEBAgBk1apQJDg42x44ds6+bPXu2Acxvv/1mX7Z9+3YDmN9//90YY8z1119f4gqSMcZ88sknJiQkxD4PmLFjx5Zoc66AApg9e/bY2zz44IPG09PTZGdnl+irBx988JKP8Vz7nTVrlgkKCrLPn+t7JiJSGZXm/F2qm2RXrlzJyJEj+e2331i2bBlFRUX06NGD06dP29u88sorTJ8+nZkzZ7J+/XqCg4O55ZZbyM7OtrcZO3YsCxcuZN68eaxZs4acnBxuv/12iouLS1POJdu+fTuHDh2ie/fuJZZ37NjR/mcXFxfat2/P9u3b7cv+/e9/0759e2rXro23tzfvv/8+Bw8evKTPbNOmjf3PISEhAKSnp9uXLVq0iL59+1607o4dO5a4R6Vz587k5ORw6NChcx7bvffeS1JSEk2bNmX06NH88MMPJfYXFhZW4kWLLVq0oEaNGiWOu379+iXekxAUFESLFi1KvHkyKCjIfjzx8fHk5ORQq1YtvL297VNycjJ79+4F/niD5ZIlS+zHnJCQgDGGJk2alNhm5cqV9m0Axo8fT9OmTXn77beZPXs2AQEBJfroz7+3PzVr1qzE8cTHx/P888+X+IwRI0aQmprKmTNn7Nv9dR/n4+npScOGDUv0Qf369fH29j5nv1zqMf59vyEhISW+KyIi1VGpbpL97rvvSszPnj2bwMBA4uPjueGGGzDG8Oabb/LUU0/Rv39/AD766COCgoKIi4vjwQcfJDMzkw8++IBPPvnEflL99NNPCQsLY/ny5fTs2bOMDu3/Wbx4MbfccgseHh4XbftnGPjss8/417/+xeuvv07Hjh3x8fHh1Vdf5ffff7+kz3R1dT1rnzabDYC0tDQSEhLo3bv3BfdhjDnrBlpjTIl9/v3YoqOjSU5O5ttvv2X58uUMGjSI7t2788UXX5xzf+f6nL/W/udnnWvZn8djs9kICQlhxYoVZ+27Ro0aAKxbt46CggK6dOli38bZ2Zn4+HicnZ1LbPPXE356ejo7d+7E2dmZ3bt3c+utt571Gec6pr/2+XPPPWf/Pv6Vu7u7/c9eXl5nrf+7y+mXSznGc+3jz79nEZHq6oqe4snMzATA398fgOTkZNLS0ujRo4e9jdVqpWvXrqxdu5YHH3yQ+Ph4CgsLS7QJDQ2lVatWrF279pwBJT8/n/z8fPt8VlZWqepctGgRDzzwwFnLf/vtN2644QYAioqKiI+PJzY2FoDVq1fTqVMnHnnkEXv7v/5fL4Cbm9tlXfVZvHgxHTt2LHE14Fz7atGiBV9++WWJALF27Vp8fHyoU6fOeY/N19eXwYMHM3jwYO666y5uvfVWTp48SYsWLTh48CApKSn2qyjbtm0jMzOT5s2bl/o4/hQdHU1aWhouLi7Ur1//nG0WLVpE79697SfqqKgoiouLSU9P5/rrrz/vvu+77z5atWrFiBEjuP/++7n55ptp0aKFfX1RUREbNmzg2muvBWDnzp1kZGTQrFkze207d+6kUaNGl318l+tSj/FiLvd7JiJyOfIKi3luyVai69VkYPuwi29QTi77d1CMMYwbN44uXbrQqlUr4I8rA/DHZe6/CgoKsq9LS0vDzc2NmjVrnrfN302dOhU/Pz/79NchiotJT09n/fr13H777WetmzVrFgsXLmTHjh2MHDmSU6dOcd999wHQqFEjNmzYwPfff8+uXbt45plnWL9+fYnt69evz6ZNm9i5cyfHjx+nsLDwkmo616O29evX5/fff2f//v0cP34cm83GI488QkpKCqNGjWLHjh0sWrSISZMmMW7cOJycnM55bG+88Qbz5s1jx44d7Nq1i88//5zg4GBq1KhB9+7dadOmDcOGDSMhIYF169Zxzz330LVr10sa4jif7t2707FjR/r168f333/P/v37Wbt2LU8//TQbNmw45zE3adKEYcOGcc8997BgwQKSk5NZv349L7/8MkuXLgX++Pv59ddf+fjjj4mJieGuu+5i2LBhFBQU2Pfj6urKqFGj+P3330lISOCf//wnHTp0sAeWZ599lo8//pjJkyezdetWtm/fzvz583n66acv+3gv1aUc46W43O+ZiEhp7UnPod+sX5i7LoXJi7eScabg4huVk8sOKLGxsWzatIm5c+eete5cwxIX+62PC7WZOHEimZmZ9iklJeWS61yyZAnXXXcdgYGBZ62bNm0aL7/8MpGRkaxevZpFixbZr2o89NBD9O/fn8GDB3Pddddx4sSJEldTAEaMGEHTpk3t96n88ssvF63n9OnT/Pjjj2fdfzJhwgScnZ1p0aIFtWvX5uDBg9SpU4elS5eybt06IiMjeeihh7j//vvtJ9dzHZu3tzcvv/wy7du355prrmH//v0sXboUJycnLBYLX331FTVr1uSGG26ge/fuNGjQgPnz519yf56LxWJh6dKl3HDDDdx33300adKEIUOGsH//foKCgti7dy979uw56+rY7Nmzueeee+z3mfTt25fff/+dsLAwduzYwaOPPso777xjD6SzZs0iIyODZ555xr4PT09PHn/8cWJiYujYsSMeHh7MmzfPvr5nz558/fXXLFu2jGuuuYYOHTowffp0wsPDr+iYL9WFjvFSXc73TESktL6MP0Sft9ewIy2bAG8r793dnhqebo4r6HLuwo2NjTV169Y1+/btK7F87969BjAJCQkllvft29fcc889xpg/nvgAzMmTJ0u0adOmjXn22Wcv6fNLcxdwnz59znoy588nbRITEy/p88rSl19+aZo3b14m+zrXsVVEr7/+uunVq1eZ7/fP3xAREZHLdzq/0Iz/LMmEP/61CX/8azP0P7+ao1m55fJZ5fYUjzGG2NhYFixYwE8//URERESJ9REREQQHB7Ns2TL7soKCAlauXEmnTp0AaNeuHa6uriXapKamsmXLFnubstSlSxeGDh1a5vu9XH9e4SgLFe3Yzqdu3bpMnDjR0WWIiMjf7DqazR0zf+GL+EM4WeBf3Zvwyf3XEejjfvGNy1mpbpIdOXIkcXFxLFq0CB8fH/s9I35+fnh4eGCxWBg7dixTpkyhcePGNG7cmClTpuDp6UlMTIy97f3338/48eOpVasW/v7+TJgwgdatW5/1GHBZeOyxx8p8n1firzcHX6mKdmznM2jQIEeXICIif2GM4bMNKUxavJW8QhuBPlbeGhJFx4a1HF2ancWYS3+e8Xz3iMyePZt7770X+OOgn3vuOd577z1OnTrFddddx6xZs+w30gLk5eXx6KOPEhcXR25uLjfffHOJew0uJisrCz8/PzIzM/H19b3U8kVERKq9nPwinl64ma+SjgBwfeMA3hjclgBva7l/dmnO36UKKBWFAoqIiEjpbTuSRWxcAvuOn8bZycK4W5rwcNeGODld2UtrL1Vpzt/V5m3GIiIi1ZUxhrh1B3luyTYKimwE+7rzdkwU19T3d3Rp56WAIiIiUoVl5xXyxILNfLMpFYBuTWvz+qC2+Hs58BHiS6CAIiIiUkVtOZzJyLgEDpw4g4uThcdubcoDXRpctSGdK3HZP9RW1a1YsQKLxUJGRgYAH374of29MiIiIhWZMYYPf0mm/ztrOXDiDHVqePDZQx35/264evebXCkFlEs0ePBgdu3adUX7mDp1Ktdccw0+Pj4EBgbSr18/du7cWUYVioiIQGZuIQ9/msDkJdsoKLZxS4sglo6+nuh6NS++cQWigHKJPDw8zvlz+aWxcuVKRo4cyW+//cayZcsoKiqiR48enD59uoyqFBGR6iwpJYPeM1bz3dY0XJ0tPHt7C/5zdzv8PF0vvnEFU20CSn5+PqNHjyYwMBB3d3e6dOlS4uV/S5cupUmTJnh4eNCtWzf2799fYvu/DvHs3LkTi8XCjh07SrSZPn069evX53xPbn/33Xfce++9tGzZksjISGbPns3BgweJj48v02MVEZHqxRjDf1fvY+C/13LoVC71/D358uFO3Ncl4qLvwquoqk1Aeeyxx/jyyy/56KOPSEhIoFGjRvTs2ZOTJ0+SkpJC//79ue2220hKSuKBBx7giSeeOO++mjZtSrt27ZgzZ06J5XFxccTExFzylyEzMxMAf/+K+5iXiIhUbBlnChjx8QZe/GY7hcWG21oH8/XoLrSpW8PRpV2RavEUz+nTp3n33Xf58MMP6dWrFwDvv/8+y5Yt44MPPuDUqVM0aNCAN954A4vFQtOmTdm8efMF35kzbNgwZs6cyQsvvADArl27iI+P5+OPP76kmowxjBs3ji5dupT4lV0REZFLFX/gJKPiEjmSmYebixPP9G7OPzqEV9qrJn9VLa6g7N27l8LCQjp37mxf5urqyrXXXsv27dvZvn07HTp0KPEX2rFjxwvuc8iQIRw4cIDffvsNgDlz5tC2bVtatGjB6tWr8fb2tk9/v9ICEBsby6ZNm5g7d24ZHaWIiFQXNpvh3RV7GfTebxzJzCMiwIuFj3Ti7o71q0Q4gWpyBeXPe0L+/pdmjMFisZz3npELCQkJoVu3bsTFxdGhQwfmzp3Lgw8+CED79u1JSkqytw0KCiqx7ahRo1i8eDGrVq2ibt26pf5sERGpvk7k5DP+842s2HkMgL6RoUzp3xpva9U6pVeLKyiNGjXCzc2NNWvW2JcVFhayYcMGmjdvTosWLexXQv709/lzGTZsGPPnz+fXX39l7969DBkyBPjjiZ9GjRrZJx8fH+CPQBQbG8uCBQv46aefiIiIKMOjFBGRqu73fSe4bcZqVuw8htXFian9W/PWkLZVLpxANbmC4uXlxcMPP8yjjz6Kv78/9erV45VXXuHMmTPcf//9ZGdn8/rrrzNu3DgefPBB4uPj+fDDDy+63/79+/Pwww/z8MMP061bN+rUqXPB9iNHjiQuLo5Fixbh4+NDWloaAH5+fnh4eJTFoYqISBVUbDO8u2IP05ftwmagYW0vZg2Lpllw1X1hbrW4ggIwbdo0BgwYwN133010dDR79uzh+++/p2bNmtSrV48vv/ySJUuWEBkZyb///W+mTJly0X36+vrSp08fNm7cyLBhwy7a/t133yUzM5Mbb7yRkJAQ+zR//vyyOEQREamCjmXnM/x/63jthz/CSf/oOiyO7VKlwwmAxVzODRgOVprXNYuIiFRWa/ccZ8z8JI5l5+Ph6szzd7RkYPswR5d12Upz/q4WQzwiIiKVSbHN8NaPu3n7p90YA02CvJkVE03jIB9Hl3bVKKCIiIhUIEez8hgzL5Hf9p0EYHD7MCb3bYmHm7ODK7u6FFBEREQqiFW7jvGv+UmcOF2Ap5szU+5sTb+oCz+AUVUpoIiIiDhYUbGNN5bv4p0VezEGmgX7MGtYNA1rezu6NIdRQBEREXGg1MxcRs9NZP3+UwAMu64ez9zeAnfX6jWk83cKKCIiIg7y8450xn2WxKkzhXhbXZjavzV9IkMdXVaFoIAiIiJylRUW23jt+528t2ofAC1DfZkVE039AC8HV1ZxKKCIiIhcRYdOnWHU3EQSD2YAcG+n+ky8rRlWl+o9pPN3CigiIiJXyQ9b03j0i01k5hbi4+7Cq3e14dZWIY4uq0JSQBERESlnBUU2pn27g//9kgxAZF0/ZsZEE+bv6eDKKi4FFBERkXKUcvIMsXEJbDyUCcADXSJ47NZmuLlUm9fhXRYFFBERkXLy7eZUHvtyE9l5Rfh5uPL6wEi6twhydFmVggKKiIhIGcsrLGbK0u18/OsBAKLr1eDtmGjq1PBwcGWVhwKKiIhIGdp//DQj4xLYeiQLgAe7NmBCj6a4OmtIpzQUUERERMrI4o1HeHLBZnLyi/D3cuP1QZF0axro6LIqJQUUERGRK5RXWMxzS7Yxd91BAK6t78+MoVEE+7k7uLLKSwFFRETkCuxJzyE2LoEdadlYLBDbrRFjbm6Mi4Z0rkipe2/VqlX06dOH0NBQLBYLX331VYn1FovlnNOrr75qb3PjjTeetX7IkCFXfDAiIiJX04KEQ/SduYYdadkEeLvx8X3XMr5HU4WTMlDqKyinT58mMjKSf/7znwwYMOCs9ampqSXmv/32W+6///6z2o4YMYLnn3/ePu/hoTubRUSkcjhTUMSkRVv5PP4QAB0b1OKtIW0J9NWQTlkpdUDp1asXvXr1Ou/64ODgEvOLFi2iW7duNGjQoMRyT0/Ps9qKiIhUdLuOZjNyTgK703NwssCYm5sQe1MjnJ0sji6tSinXa1BHjx7lm2++4f777z9r3Zw5cwgICKBly5ZMmDCB7Ozs8+4nPz+frKysEpOIiMjVZIzhs/Up9J25ht3pOdT2sfLpA9cxpntjhZNyUK43yX700Uf4+PjQv3//EsuHDRtGREQEwcHBbNmyhYkTJ7Jx40aWLVt2zv1MnTqV5557rjxLFREROa/T+UU8/dUWFiYeBuD6xgG8MbgtAd5WB1dWdVmMMeayN7ZYWLhwIf369Tvn+mbNmnHLLbfw9ttvX3A/8fHxtG/fnvj4eKKjo89an5+fT35+vn0+KyuLsLAwMjMz8fX1vdzyRURELmp7ahYj5ySw7/hpnCwwvkdTHu7aECddNSm1rKws/Pz8Lun8XW5XUFavXs3OnTuZP3/+RdtGR0fj6urK7t27zxlQrFYrVqtSqoiIXD3GGOLWHeS5JdsoKLIR7OvOjKFRXBvh7+jSqoVyCygffPAB7dq1IzIy8qJtt27dSmFhISEhIeVVjoiIyCXLzitk4oLNfL3pjydTuzWtzeuD2uLv5ebgyqqPUgeUnJwc9uzZY59PTk4mKSkJf39/6tWrB/xxCefzzz/n9ddfP2v7vXv3MmfOHG677TYCAgLYtm0b48ePJyoqis6dO1/BoYiIiFy5LYcziY1LYP+JM7g4WXjs1qY80KWBhnSuslIHlA0bNtCtWzf7/Lhx4wAYPnw4H374IQDz5s3DGMPQoUPP2t7NzY0ff/yRt956i5ycHMLCwujduzeTJk3C2dn5Mg9DRETkyhhj+PjXA7z0zXYKim3UqeHBjKFRtAuv6ejSqqUruknWUUpzk42IiMjFZOYW8sSXm/h2SxoAt7QI4tW72lDDU0M6ZalC3CQrIiJSGWxMySB2bgIpJ3NxdbYwsVdz/tm5PhaLhnQcSQFFRESqJWMM//tlP9O+3U5hsSHM34OZQ6OJDKvh6NIEBRQREamGMs4UMOHzTSzffhSAXq2CmTagDX4erg6uTP6kgCIiItVK/IFTjJ6byOGMXNycnXjm9ub8o0O4hnQqGAUUERGpFmw2w/ur9/Hq9zspshnq1/JkZkw0rer4Obo0OQcFFBERqfJOni5g/GdJ/LzzGAB9IkOZcmcrfNw1pFNRKaCIiEiVti75JKPnJpKWlYfVxYlJfVoy9NowDelUcAooIiJSJdlshndW7GH6sl3YDDSo7cWsmGiah+j3syoDBRQREalyjmXnM+6zJFbvPg5A/6g6vNCvFV5WnfYqC/1NiYhIlbJ2z3HGzE/iWHY+7q5OPH9HKwa2q6shnUpGAUVERKqEYpthxo+7mfHTboyBxoHevDMsmsZBPo4uTS6DAoqIiFR66Vl5jJmXxK/7TgAwqH1dnuvbCg83vYS2slJAERGRSm317mP8a34Sx3MK8HRz5qU7W3FnVF1HlyVXSAFFREQqpaJiG28u382sFXswBpoF+zAzJppGgd6OLk3KgAKKiIhUOqmZuYyZm8S6/ScBiLmuHs/e3gJ3Vw3pVBUKKCIiUqn8vCOdcZ8lcepMId5WF6b0b03fyFBHlyVlTAFFREQqhcJiG699v5P3Vu0DoFUdX2YOjaZ+gJeDK5PyoIAiIiIV3uGMXEbFJZBwMAOA4R3DebJ3c6wuGtKpqhRQRESkQlu27SgTPt9IZm4hPu4uvDKgDb1ahzi6LClnCigiIlIhFRTZePm7HXywJhmAyLp+zIyJJszf08GVydWggCIiIhVOyskzxMYlsPFQJgD3d4ng8Vub4ebi5ODK5GpRQBERkQrluy2pPPrFJrLzivDzcOW1gZHc0iLI0WXJVaaAIiIiFUJeYTFTl27no18PABBdrwZvx0RTp4aHgysTR1BAERERh9t//DQj4xLYeiQLgAe7NmBCj6a4OmtIp7pSQBEREYdavPEITy7YTE5+ETU9XZk+qC3dmgU6uixxMAUUERFxiLzCYp7/ehtxvx8E4Nr6/rw1tC0hfhrSEQUUERFxgL3Hchg5J4EdadlYLPDIjQ35V/cmuGhIR/6PAoqIiFxVCxMP8dTCLZwpKCbA2403Brfl+sa1HV2WVDAKKCIiclXkFhQzafEWPttwCICODWrx1pC2BPq6O7gyqYgUUEREpNztPprNI3MS2J2eg8UCY25uzKibGuPsZHF0aVJBKaCIiEi5+nxDCs8s2kJeoY3aPlbeGtKWTg0DHF2WVHClvhtp1apV9OnTh9DQUCwWC1999VWJ9ffeey8Wi6XE1KFDhxJt8vPzGTVqFAEBAXh5edG3b18OHTp0RQciIiIVy+n8IsZ9lsSjX2wir9DG9Y0D+HbM9QoncklKHVBOnz5NZGQkM2fOPG+bW2+9ldTUVPu0dOnSEuvHjh3LwoULmTdvHmvWrCEnJ4fbb7+d4uLi0h+BiIhUODvSsug7cw0LEg7jZIFHezblo39eS4C31dGlSSVR6iGeXr160atXrwu2sVqtBAcHn3NdZmYmH3zwAZ988gndu3cH4NNPPyUsLIzly5fTs2fP0pYkIiIVhDGGeetTmLx4K/lFNoJ93ZkxNIprI/wdXZpUMuXywPmKFSsIDAykSZMmjBgxgvT0dPu6+Ph4CgsL6dGjh31ZaGgorVq1Yu3atefcX35+PllZWSUmERGpWLLzChk9L4mJCzaTX2SjW9PaLB1zvcKJXJYyv0m2V69eDBw4kPDwcJKTk3nmmWe46aabiI+Px2q1kpaWhpubGzVr1iyxXVBQEGlpaefc59SpU3nuuefKulQRESkjWw5nEhuXwP4TZ3B2svBYz6aMuL4BTnpKRy5TmQeUwYMH2//cqlUr2rdvT3h4ON988w39+/c/73bGGCyWc3+RJ06cyLhx4+zzWVlZhIWFlV3RIiJyWYwxfPrbAV74ejsFxTbq1PBgxtAo2oXXvPjGIhdQ7o8Zh4SEEB4ezu7duwEIDg6moKCAU6dOlbiKkp6eTqdOnc65D6vVitWqG6tERCqSzNxCJi7YxNLNf1z97t48iNcGtqGGp5uDK5OqoNxfenDixAlSUlIICQkBoF27dri6urJs2TJ7m9TUVLZs2XLegCIiIhXLpkMZ3P72apZuTsPV2cLTvZvz/j3tFE6kzJT6CkpOTg579uyxzycnJ5OUlIS/vz/+/v5MnjyZAQMGEBISwv79+3nyyScJCAjgzjvvBMDPz4/777+f8ePHU6tWLfz9/ZkwYQKtW7e2P9UjIiIVkzGG//2yn2nfbqew2FC3pgczY6JpG1bD0aVJFVPqgLJhwwa6detmn//z3pDhw4fz7rvvsnnzZj7++GMyMjIICQmhW7duzJ8/Hx8fH/s2b7zxBi4uLgwaNIjc3FxuvvlmPvzwQ5ydncvgkEREpDxknClgwuebWL79KAA9Wwbxyl2R+Hm4OrgyqYosxhjj6CJKKysrCz8/PzIzM/H19XV0OSIiVV78gVOMnpvI4Yxc3JydeKp3c+7pGH7ehxtEzqU052+9i0dERM7LZjO8v3ofr36/kyKbIbyWJ7NiomlVx8/RpUkVp4AiIiLndPJ0AeM/S+LnnccAuL1NCFP7t8bHXUM6Uv4UUERE5Czrkk8yem4iaVl5uLk4MblPS4ZeG6YhHblqFFBERMTOZjO8u3Iv05ftothmaBDgxaxh0TQP0f1+cnUpoIiICADHc/L51/wkVu8+DsCdUXV4sV8rvKw6VcjVp2+diIjw694TjJmXSHp2Pu6uTjzftxUD29fVkI44jAKKiEg1VmwzvP3Tbmb8uBubgcaB3swaFk2TIJ+LbyxSjhRQRESqqfTsPMbOS2Lt3hMADGxXl+fuaImnm04N4nj6FoqIVENrdh9n7PxEjucU4OnmzIv9WtE/uq6jyxKxU0AREalGioptvLl8N7NW7MEYaBbsw8yYaBoFeju6NJESFFBERKqJtMw8Rs9LZF3ySQBirqvHs7e3wN1V70GTikcBRUSkGlixM51xn23k5OkCvK0uTOnfmr6RoY4uS+S8FFBERKqwwmIbr/+wi3+v3AtAy1BfZsVEUz/Ay8GViVyYAoqISBV1OCOX0XMTiT9wCoDhHcOZeFtzDelIpaCAIiJSBS3fdpTxn28kM7cQH3cXXhnQhl6tQxxdlsglU0AREalCCopsvPLdDv67JhmAyLp+vD00mnq1PB1cmUjpKKCIiFQRKSfPEDs3kY0pGQDc1zmCJ3o1w83FybGFiVwGBRQRkSrguy1pPPrFRrLzivDzcOW1gZHc0iLI0WWJXDYFFBGRSiy/qJgp32zno18PABBVrwZvD42ibk0N6UjlpoAiIlJJ7T9+mti5CWw5nAXAg10bMKFHU1ydNaQjlZ8CiohIJfT1piM88eVmcvKLqOnpyvRBbenWLNDRZYmUGQUUEZFKJK+wmBe+3sac3w8CcE39mswYGkWIn4eDKxMpWwooIiKVxN5jOYyck8COtGwsFnjkxob8q3sTXDSkI1WQAoqISCXwVeJhnly4mTMFxdTycuONwW25oUltR5clUm4UUEREKrDcgmImL97K/A0pAHRo4M+MIVEE+ro7uDKR8qWAIiJSQe0+ms3IuAR2Hc3BYoHRNzVm9M2NcXayOLo0kXKngCIiUgF9viGFZxdtJbewmNo+Vt4a3JZOjQIcXZbIVaOAIiJSgZzOL+KZRVtYkHAYgC6NAnhjcFtq+1gdXJnI1aWAIiJSQexIy2LknAT2HjuNkwXG3dKER25shJOGdKQaUkAREXEwYwzz1qcwefFW8otsBPlamTEkiusa1HJ0aSIOo4AiIuJA2XmFPLlwC0s2HgGga5PaTB8USS1vDelI9aaAIiLiIFsOZxIbl8D+E2dwdrLwaM+m/H/XN9CQjghQ6p8fXLVqFX369CE0NBSLxcJXX31lX1dYWMjjjz9O69at8fLyIjQ0lHvuuYcjR46U2MeNN96IxWIpMQ0ZMuSKD0ZEpDIwxvDJr/vp/85a9p84Q6ifO5892IGHujZUOBH5P6UOKKdPnyYyMpKZM2eete7MmTMkJCTwzDPPkJCQwIIFC9i1axd9+/Y9q+2IESNITU21T++9997lHYGISCWSlVfIyLgEnlm0lYJiG92bB/LN6OtpF+7v6NJEKpRSD/H06tWLXr16nXOdn58fy5YtK7Hs7bff5tprr+XgwYPUq1fPvtzT05Pg4ODSfryISKW16VAGI+MSSDmZi4uThSd6NeP+LhFYLLpqIvJ35f6GqczMTCwWCzVq1CixfM6cOQQEBNCyZUsmTJhAdnb2efeRn59PVlZWiUlEpLIwxvC/NckMeHctKSdzqVvTgy8e7sQD1zdQOBE5j3K9STYvL48nnniCmJgYfH197cuHDRtGREQEwcHBbNmyhYkTJ7Jx48azrr78aerUqTz33HPlWaqISLnIPFPIo19s5IdtRwHo2TKIV+6KxM/D1cGViVRsFmOMueyNLRYWLlxIv379zlpXWFjIwIEDOXjwICtWrCgRUP4uPj6e9u3bEx8fT3R09Fnr8/Pzyc/Pt89nZWURFhZGZmbmBfcrIuJICQdPMSoukcMZubg5O/FU7+bc0zFcV02k2srKysLPz++Szt/lcgWlsLCQQYMGkZyczE8//XTRIqKjo3F1dWX37t3nDChWqxWrVb8JICKVg81m+O+afbzy3U6KbIbwWp7MHBpN67p+ji5NpNIo84DyZzjZvXs3P//8M7VqXfyXELdu3UphYSEhISFlXY6IyFV16nQB4z/fyE870gHo3SaEaf1b4+OuIR2R0ih1QMnJyWHPnj32+eTkZJKSkvD39yc0NJS77rqLhIQEvv76a4qLi0lLSwPA398fNzc39u7dy5w5c7jtttsICAhg27ZtjB8/nqioKDp37lx2RyYicpWt33+S0XMTSc3Mw83FiUl9WhBzbT0N6YhchlLfg7JixQq6det21vLhw4czefJkIiIizrndzz//zI033khKSgr/+Mc/2LJlCzk5OYSFhdG7d28mTZqEv/+l/Q5AacawRETKm81meHflXqYv20WxzdAgwIuZMdG0CNW/TyJ/VZrz9xXdJOsoCigiUlEcz8ln3GcbWbXrGAB3RtXhxX6t8LLqTSIif+fwm2RFRKqD3/adYPTcRNKz83F3deL5vq0Y2L6uhnREyoACiohIKRXbDDN/2sNbP+7CZqBRoDfvDIumSZCPo0sTqTIUUERESiE9O49/zU/ilz0nALirXV2ev6Mlnm7651SkLOm/KBGRS/TLnuOMmZfE8Zx8PFydebFfKwa0q+voskSqJAUUEZGLKCq2MePH3bz98x6MgaZBPswaFk2jQG9HlyZSZSmgiIhcwNGsPEbNTWRd8kkAhl4bxrO3t8TDzdnBlYlUbQooIiLnsXLXMf41P4mTpwvwcnNmSv/W3NG2jqPLEqkWFFBERP6mqNjG68t28e6KvQC0CPFl1rBoIgK8HFyZSPWhgCIi8hdHMnIZPTeRDQdOAXB3h3Ce6t0cd1cN6YhcTQooIiL/58ftRxn/+UYyzhTiY3Xh5bvacFtrvcRUxBEUUESk2isosvHKdzv475pkANrU9WPm0Gjq1fJ0cGUi1ZcCiohUayknzzBqbiJJKRkA3Nc5gsd7NcXqoiEdEUdSQBGRauv7rWk8+vlGsvKK8HV34dWBkfRsGezoskQEBRQRqYbyi4qZunQHH67dD0BUvRq8PTSKujU1pCNSUSigiEi1cuDEaWLjEtl8OBOA/++GBjzasymuzk4OrkxE/koBRUSqjW82pfLEl5vIzi+ihqcr0wdFclOzIEeXJSLnoIAiIlVeXmExL36zjU9/OwhA+/CazBgaRWgNDwdXJiLno4AiIlXavmM5jIxLZHtqFgAP39iQcbc00ZCOSAWngCIiVdZXiYd5cuFmzhQUU8vLjemD29K1SW1HlyUil0ABRUSqnNyCYiYv3sr8DSkAXBfhz4yhUQT5uju4MhG5VAooIlKl7D6azci4BHYdzcFigVE3NWb0TY1w0ZCOSKWigCIiVcbnG1J4dtFWcguLCfC28taQtnRuFODoskTkMiigiEildzq/iGcWbWFBwmEAOjeqxRuD2xLooyEdkcpKAUVEKrUdaVmMnJPA3mOncbLAv7o34ZFujXB2sji6NBG5AgooIlIpGWOYvz6FSYu3kl9kI8jXyltDoujQoJajSxORMqCAIiKVTk5+EU8t3MyipCMAdG1Sm+mDIqnlbXVwZSJSVhRQRKRS2Xokk9i4RJKPn8bZycKEHk158IYGOGlIR6RKUUARkUrBGMOnvx/kha+3UVBkI8TPnbeHRtG+vr+jSxORcqCAIiIVXlZeIRO/3Mw3m1MBuLlZIK8NjKSml5uDKxOR8qKAIiIV2qZDGcTGJXLw5BlcnCw80asZ93eJwGLRkI5IVaaAIiIVkjGGD9fuZ8rS7RQWG+rU8GBmTBRR9Wo6ujQRuQpK/dvPq1atok+fPoSGhmKxWPjqq69KrDfGMHnyZEJDQ/Hw8ODGG29k69atJdrk5+czatQoAgIC8PLyom/fvhw6dOiKDkREqo7MM4U8+Ek8zy3ZRmGxoUeLIJaOvl7hRKQaKXVAOX36NJGRkcycOfOc61955RWmT5/OzJkzWb9+PcHBwdxyyy1kZ2fb24wdO5aFCxcyb9481qxZQ05ODrfffjvFxcWXfyQiUiUkHjzFbTNW88O2o7g6W5jUpwXv3d0OP09XR5cmIleRxRhjLntji4WFCxfSr18/4I+rJ6GhoYwdO5bHH38c+ONqSVBQEC+//DIPPvggmZmZ1K5dm08++YTBgwcDcOTIEcLCwli6dCk9e/a86OdmZWXh5+dHZmYmvr6+l1u+iFQgNpvhgzXJvPzdDopshnr+nsyMiaJN3RqOLk1Eykhpzt9l+nrP5ORk0tLS6NGjh32Z1Wqla9eurF27FoD4+HgKCwtLtAkNDaVVq1b2Nn+Xn59PVlZWiUlEqo5Tpwt44OMNvLR0O0U2w22tg/l6dBeFE5FqrEwDSlpaGgBBQUEllgcFBdnXpaWl4ebmRs2aNc/b5u+mTp2Kn5+ffQoLCyvLskXEgTbsP8ltM1bz04503FyceKFfK2bFROPrriEdkeqsTAPKn/7++J8x5qKPBF6ozcSJE8nMzLRPKSkpZVariDiGzWZ4Z8UeBv/nN1Iz84gI8GLhI524u0O4HiEWkbJ9zDg4OBj44ypJSEiIfXl6err9qkpwcDAFBQWcOnWqxFWU9PR0OnXqdM79Wq1WrFa9Y0OkqjiRk8+4zzayctcxAO5oG8pLd7bG26pfPhCRP5TpFZSIiAiCg4NZtmyZfVlBQQErV660h4927drh6upaok1qaipbtmw5b0ARkarjt30nuG3GalbuOobVxYmXB7TmzcFtFU5EpIRS/4uQk5PDnj177PPJyckkJSXh7+9PvXr1GDt2LFOmTKFx48Y0btyYKVOm4OnpSUxMDAB+fn7cf//9jB8/nlq1auHv78+ECRNo3bo13bt3L7sjE5EKpdhmmPXzHt5cvgubgYa1vXhnWDuaBvs4ujQRqYBKHVA2bNhAt27d7PPjxo0DYPjw4Xz44Yc89thj5Obm8sgjj3Dq1Cmuu+46fvjhB3x8/t8/Qm+88QYuLi4MGjSI3Nxcbr75Zj788EOcnZ3L4JBEpKJJz85j3PyNrNlzHIAB0XV5oV9LPN101UREzu2KfgfFUfQ7KCKVxy97jjNmXhLHc/LxcHXmhX6tuKtdXUeXJSIOUJrzt/73RUTKRbHN8NbyXbz98x6MgaZBPswaFkWjQA3piMjFKaCISJk7mpXH6LmJ/J58EoAh14QxqU9LPNw0jCsil0YBRUTK1Iqd6Yz7bCMnTxfg5ebMlP6tuaNtHUeXJSKVjAKKiJSJomIbry/bxbsr9gLQPMSXWTFRNKjt7eDKRKQyUkARkSt2JCOX0XMT2XDgFAB3dwjnqd7NcXfVkI6IXB4FFBG5Ij9uP8r4zzeScaYQH6sL0wa0oXebkItvKCJyAQooInJZCopsvPLdDv67JhmA1nX8mBkTRXgtLwdXJiJVgQKKiJRayskzjJqbSFJKBgD/7FyfJ3o1w+qiIR0RKRsKKCJSKt9vTePRzzeSlVeEr7sLrw6MpGfLYEeXJSJVjAKKiFyS/KJipi7dwYdr9wPQNqwGbw+NIszf07GFiUiVpIAiIhd14MRpYuMS2Xw4E4AR10fwaM9muLmU6QvRRUTsFFBE5IK+2ZTKE19uIju/iBqerrw+MJKbmwc5uiwRqeIUUETknPIKi3nxm218+ttBANqH12TG0ChCa3g4uDIRqQ4UUETkLPuO5TAyLpHtqVkAPHJjQ8bd0gQXZw3piMjVoYAiIiUsSjrMkws2c7qgmFpebkwf3JauTWo7uiwRqWYUUEQEgNyCYp5bspV561MAuC7CnxlDowjydXdwZSJSHSmgiAh70rMZOSeRnUezsVhgVLdGjL65sYZ0RMRhFFBEqrkv4g/xzFdbyC0sJsDbyltD2tK5UYCjyxKRak4BRaSaOlNQxDNfbeXLhEMAdG5UizcGtyXQR0M6IuJ4Cigi1dDOtGwemRPP3mOncbLA2O5NGNmtEc5OFkeXJiICKKCIVCvGGOavT2HS4q3kF9kI8rXy1pAoOjSo5ejSRERKUEARqSZy8ot4auFmFiUdAeCGJrV5Y1AktbytDq5MRORsCigi1cDWI5mMiktk3/HTODtZGN+jCQ/d0BAnDemISAWlgCJShRlj+PT3g7zw9TYKimyE+LkzY2gU19T3d3RpIiIXpIAiUkVl5RUyccFmvtmUCsDNzQJ5bWAkNb3cHFyZiMjFKaCIVEGbD2UyMi6BgyfP4OJk4fFbm/HA9RFYLBrSEZHKQQFFpAoxxvDR2v1MWbqDgmIbdWp48HZMFNH1ajq6NBGRUlFAEakiMs8U8tiXG/l+61EAerQI4tW7IvHzdHVwZSIipaeAIlIFJB48xai5iRw6lYurs4Unb2vOvZ3qa0hHRCotBRSRSswYwwdrkpn27Q6KbIZ6/p7MjImiTd0aji5NROSKKKCIVFKnThcw4fON/LgjHYDbWgczbUAbfN01pCMilV+Zv0u9fv0/Liv/fRo5ciQA995771nrOnToUNZliFRp8QdO0nvGan7ckY6bixMv9GvFrJhohRMRqTLK/ArK+vXrKS4uts9v2bKFW265hYEDB9qX3XrrrcyePds+7+am32UQuRQ2m+G9Vft47YedFNsMEQFezIyJomWon6NLExEpU2UeUGrXrl1iftq0aTRs2JCuXbval1mtVoKDg8v6o0WqtBM5+Yz7bCMrdx0DoG9kKFP6t8bbqpFaEal6ynyI568KCgr49NNPue+++0o8TbBixQoCAwNp0qQJI0aMID09vTzLEKn0ft93gttmrGblrmNYXZyY2r81bw1pq3AiIlVWuf7r9tVXX5GRkcG9995rX9arVy8GDhxIeHg4ycnJPPPMM9x0003Ex8djtZ77rar5+fnk5+fb57OyssqzbJEKo9hmeOfnPbyxfBc2Aw1rezFrWDTNgn0dXZqISLmyGGNMee28Z8+euLm5sWTJkvO2SU1NJTw8nHnz5tG/f/9ztpk8eTLPPffcWcszMzPx9dU/1FI1HcvO51/zk1iz5zgAA6Lr8kK/lni66aqJiFROWVlZ+Pn5XdL5u9yGeA4cOMDy5ct54IEHLtguJCSE8PBwdu/efd42EydOJDMz0z6lpKSUdbkiFcove47T663VrNlzHA9XZ14bGMnrgyIVTkSk2ii3f+1mz55NYGAgvXv3vmC7EydOkJKSQkhIyHnbWK3W8w7/iFQlxTbDWz/u5u2fdmMMNA3yYWZMFI2DfBxdmojIVVUuAcVmszF79myGDx+Oi8v/+4icnBwmT57MgAEDCAkJYf/+/Tz55JMEBARw5513lkcpIpXG0aw8Rs9N5PfkkwAMuSaMSX1a4uHm7ODKRESuvnIJKMuXL+fgwYPcd999JZY7OzuzefNmPv74YzIyMggJCaFbt27Mnz8fHx/9H6JUXyt3HWPc/CROnC7Ay82ZKf1bc0fbOo4uS0TEYcr1JtnyUpqbbEQqsqJiG9OX7eKdFXsBaB7iy6yYKBrU9nZwZSIiZa8052/dcSfiIEcychk9N5ENB04B8I8O9Xi6dwvcXTWkIyKigCLiAD/tOMq4zzaScaYQb6sL0wa05vY2oY4uS0SkwlBAEbmKCottvPr9Tv6zah8Arev4MTMmivBaXg6uTESkYlFAEblKDp06Q2xcIkkpGQDc26k+E29rhtVFQzoiIn+ngCJyFXy/NY1HP99IVl4Rvu4uvHJXJLe20gszRUTORwFFpBwVFNmY+u12Zv+yH4DIsBrMHBpFmL+nYwsTEangFFBEysnBE2eInZvApkOZAIy4PoJHezbDzaVcXyIuIlIlKKCIlIOlm1N5/ItNZOcXUcPTldfuiqR7iyBHlyUiUmkooIiUobzCYl76Zjuf/HYAgHbhNZkxNIo6NTwcXJmISOWigCJSRpKPn2bknAS2pWYB8FDXhozv0QRXZw3piIiUlgKKSBlYlHSYJxds5nRBMf5ebrw+KJJuTQMdXZaISKWlgCJyBfIKi3luyVbmrksB4NoIf2YMiSLYz93BlYmIVG4KKCKXaU96DrFxCexIy8ZigdhujRhzc2NcNKQjInLFFFBELsOChEM8tXALuYXFBHhbeXNwW7o0DnB0WSIiVYYCikgpnCko4tlFW/ki/hAAnRrW4s0hbQn00ZCOiEhZUkARuUS7jmYzck4Cu9NzcLLAmJubEHtTI5ydLI4uTUSkylFAEbkIYwyfbUhh0uKt5BXaCPSx8taQKDo2rOXo0kREqiwFFJELyMkv4umFm/kq6QgA1zcO4I3BbQnwtjq4MhGRqk0BReQ8th3JIjYugX3HT+PsZGF8jyY8dENDnDSkIyJS7hRQRP7GGMOc3w/y/NfbKCiyEeLnzoyhUVxT39/RpYmIVBsKKCJ/kZ1XyMQFm/l6UyoANzUL5LWBkfh7uTm4MhGR6kUBReT/bD6USezcBA6cOIOLk4XHbm3KA10aaEhHRMQBFFCk2jPG8NHa/UxZuoOCYht1angwY2gU7cJrOro0EZFqSwFFqrXM3EIe/2IT321NA+CWFkG8dlckfp6uDq5MRKR6U0CRaispJYPYuAQOncrF1dnCxF7N+Wfn+lgsGtIREXE0BRSpdowxfLAmmWnf7qDIZgjz92Dm0Ggiw2o4ujQREfk/CihSrWScKWDC5xtZvj0dgNtaBzNtQBt83TWkIyJSkSigSLURf+Ako+ISOZKZh5uzE8/c3px/dAjXkI6ISAWkgCJVns1m+M/qfbz6/U6KbYb6tTyZGRNNqzp+ji5NRETOQwFFqrQTOfmM/3wjK3YeA6BPZChT7myFj4Z0REQqNAUUqbJ+33eC0fMSOZqVj9XFicl9WzLkmjAN6YiIVAIKKFLlFNsM7/y8hzeW78JmoEFtL2bFRNM8xNfRpYmIyCVyKusdTp48GYvFUmIKDg62rzfGMHnyZEJDQ/Hw8ODGG29k69atZV2GVFPHsvMZ/r91vL7sj3DSP6oOS2K7KJyIiFQyZR5QAFq2bElqaqp92rx5s33dK6+8wvTp05k5cybr168nODiYW265hezs7PIoRaqRtXuO0+ut1azZcxwPV2devasN0we3xcuqC4UiIpVNufzL7eLiUuKqyZ+MMbz55ps89dRT9O/fH4CPPvqIoKAg4uLiePDBB8ujHKniim2Gt37czds/7cYYaBLkzayYaBoH+Ti6NBERuUzlcgVl9+7dhIaGEhERwZAhQ9i3bx8AycnJpKWl0aNHD3tbq9VK165dWbt27Xn3l5+fT1ZWVolJBOBoVh7D/vsbM378I5wMbh/GopFdFE5ERCq5Mg8o1113HR9//DHff/8977//PmlpaXTq1IkTJ06QlvbHC9mCgoJKbBMUFGRfdy5Tp07Fz8/PPoWFhZV12VIJrdp1jNveWs1v+07i6ebMm4Pb8vJdbfBwc3Z0aSIicoXKfIinV69e9j+3bt2ajh070rBhQz766CM6dOgAcNZjnsaYCz76OXHiRMaNG2efz8rKUkipxoqKbbyxfBfvrNiLMdA8xJdZMVE0qO3t6NJERKSMlPvdg15eXrRu3Zrdu3fTr18/ANLS0ggJCbG3SU9PP+uqyl9ZrVasVmt5lyqVQGpmLqPnJrJ+/ykAhl1Xj2dub4G7q66aiIhUJeVyD8pf5efns337dkJCQoiIiCA4OJhly5bZ1xcUFLBy5Uo6depU3qVIJffzjnRue2s16/efwtvqwsyYKF66s7XCiYhIFVTmV1AmTJhAnz59qFevHunp6bz44otkZWUxfPhwLBYLY8eOZcqUKTRu3JjGjRszZcoUPD09iYmJKetSpIooLLbx2vc7eW/VHzdbt6rjy6yYaMJreTm4MhERKS9lHlAOHTrE0KFDOX78OLVr16ZDhw789ttvhIeHA/DYY4+Rm5vLI488wqlTp7juuuv44Ycf8PHRUxdytsMZuYyKSyDhYAYA93aqz8TbmmF10VUTEZGqzGKMMY4uorSysrLw8/MjMzMTX1/9QmhVtWzbUSZ8vpHM3EJ83F149a423Noq5OIbiohIhVSa87d+YlMqnIIiG9O+3cH/fkkGIDKsBjOHRhHm7+ngykRE5GpRQJEKJeXkGWLjEth4KBOAB7pE8NitzXBzKff7uUVEpAJRQJEK49vNqTz25Say84rw83Dl9YGRdG9x/sfPRUSk6lJAEYfLKyxmytLtfPzrAQDahddkxtAo6tTwcHBlIiLiKAoo4lDJx08TG5fA1iN/vF/pwa4NmNCjKa7OGtIREanOFFDEYRZvPMKTCzaTk1+Ev5cbrw+KpFvTQEeXJSIiFYACilx1eYXFPLdkG3PXHQTg2vr+zBgaRbCfu4MrExGRikIBRa6qPek5xMYlsCMtG4sFYrs1YszNjXHRkI6IiPyFAopcNQsSDvH0V1s4U1BMgLcbbwxuy/WNazu6LBERqYAUUKTcnSkoYtKirXwefwiAjg1q8daQtgT6akhHRETOTQFFytWuo9mMnJPA7vQcnCww5uYmxN7UCGcni6NLExGRCkwBRcqFMYbP4w/x7KIt5BXaqO1jZcaQKDo2rOXo0kREpBJQQJEydzq/iKe/2sLCxMMAXN84gDcGtyXA2+rgykREpLJQQJEytT01i5FxCew7dhpnJwvjbmnCw10b4qQhHRERKQUFFCkTxhji1h3kuSXbKCiyEezrztsxUVxT39/RpYmISCWkgCJXLDuvkIkLNvP1plQAujWtzeuD2uLv5ebgykREpLJSQJErsuVwJrFxCew/cQYXJwuP3dqUB7o00JCOiIhcEQUUuSzGGD7+9QAvfbOdgmIbdWp4MGNoFO3Cazq6NBERqQIUUKTUMnMLefyLTXy3NQ2A7s2DeG1gG2p4akhHRETKhgKKlEpSSgaxcQkcOpWLq7OFib2a88/O9bFYNKQjIiJlRwFFLokxhg/WJPPydzsoLDaE+Xswc2g0kWE1HF2aiIhUQQooclEZZwqY8Pkmlm8/CkCvVsFMG9AGPw9XB1cmIiJVlQKKXFD8gVOMikvgSGYebs5OPH17c+7uEK4hHRERKVcKKHJONpvhP6v38er3Oym2GerX8mRmTDSt6vg5ujQREakGFFDkLCdPFzDusyRW7DwGQJ/IUKbc2Qofdw3piIjI1aGAIiWsSz7J6LmJpGXlYXVxYnLflgy5JkxDOiIiclUpoAjwx5DOOyv2MH3ZLmwGGtT2YlZMNM1DfB1dmoiIVEMKKMKx7HzGfZbE6t3HAegfVYcX+rXCy6qvh4iIOIbOQNXc2r3HGTMviWPZ+bi7OvHCHa0Y2D7M0WWJiEg1p4BSTRXbDG//tJsZP+7GZqBJkDezYqJpHOTj6NJEREQUUKqj9Kw8xsxL4td9JwAY1L4uz/VthYebs4MrExER+YNTWe9w6tSpXHPNNfj4+BAYGEi/fv3YuXNniTb33nsvFoulxNShQ4eyLkXOYfXuY9w2YzW/7juBp5szbwyO5JW7IhVORESkQinzKygrV65k5MiRXHPNNRQVFfHUU0/Ro0cPtm3bhpeXl73drbfeyuzZs+3zbm56E255Kiq28eby3cxasQdjoFmwD7OGRdOwtrejSxMRETlLmQeU7777rsT87NmzCQwMJD4+nhtuuMG+3Gq1EhwcXNYfL+eQmpnLmLlJrNt/EoCY6+rx7O0tcHfVVRMREamYyv0elMzMTAD8/f1LLF+xYgWBgYHUqFGDrl278tJLLxEYGHjOfeTn55Ofn2+fz8rKKr+Cq5ifd6Qz7rMkTp0pxNvqwpT+rekbGeroskRERC7IYowx5bVzYwx33HEHp06dYvXq1fbl8+fPx9vbm/DwcJKTk3nmmWcoKioiPj4eq9V61n4mT57Mc889d9byzMxMfH31Q2LnUlhs47Xvd/Leqn0AtAz1ZVZMNPUDvC6ypYiISPnIysrCz8/vks7f5RpQRo4cyTfffMOaNWuoW7fuedulpqYSHh7OvHnz6N+//1nrz3UFJSwsTAHlPA5n5DIqLoGEgxkADO8YzsTbmmtIR0REHKo0AaXchnhGjRrF4sWLWbVq1QXDCUBISAjh4eHs3r37nOutVus5r6zI2ZZtO8qEzzeSmVuIj7sLrwxoQ6/WIY4uS0REpFTKPKAYYxg1ahQLFy5kxYoVREREXHSbEydOkJKSQkiITqSXq6DIxrRvd/C/X5IBiKzrx8yYaML8PR1cmYiISOmVeUAZOXIkcXFxLFq0CB8fH9LS0gDw8/PDw8ODnJwcJk+ezIABAwgJCWH//v08+eSTBAQEcOedd5Z1OdVCyskzxMYlsPHQHzck398lgsdvbYabS5n/zI2IiMhVUeb3oFgslnMunz17Nvfeey+5ubn069ePxMREMjIyCAkJoVu3brzwwguEhV3aO2BKM4ZV1X23JZVHv9hEdl4Rfh6uvDYwkltaBDm6LBERkbM49B6Ui+UdDw8Pvv/++7L+2Gonr7CYqUu389GvBwCIrleDGUOjqFtTQzoiIlL56V08ldD+46cZGZfA1iN//B7Mg10bMKFHU1ydNaQjIiJVgwJKJbNk4xEmLthMTn4RNT1dmT6oLd2anfsH7kRERCorBZRKIq+wmOe/3kbc7wcBuLa+P28NbUuIn4eDKxMRESl7CiiVwN5jOYyck8COtGwsFhh5YyPGdm+Mi4Z0RESkilJAqeAWJh7iqYVbOFNQTIC3G28Mbsv1jWs7uiwREZFypYBSQeUWFDNp8RY+23AIgI4NavHWkLYE+ro7uDIREZHyp4BSAe06ms3IOQnsTs/BYoExNzdm1E2NcXY692/MiIiIVDUKKBWIMYbP4w/x7KIt5BXaqO1j5a0hbenUMMDRpYmIiFxVCigVxOn8Ip75agsLEg8DcH3jAN4Y3JYAb70kUUREqh8FlApge2oWI+MS2HfsNE4WGN+jKQ93bYiThnRERKSaUkBxIGMMc9elMHnJVgqKbAT7ujNjaBTXRvg7ujQRERGHUkBxkOy8Qp5cuIUlG48AcGPT2kwf1BZ/LzcHVyYiIuJ4CigOsOVwJrFxCew/cQZnJwuP9mzK/3d9Aw3piIiI/B8FlKvIGMMnvx3gxa+3U1Bso04ND2YMjaJdeE1HlyYiIlKhKKBcJZm5hUxcsImlm9MA6N48iNcGtqGGp4Z0RERE/k4B5SrYmJJB7NwEUk7m4ups4Ylezbmvc30sFg3piIiInIsCSjkyxjD7l/1M/XY7hcWGMH8PZg6NJjKshqNLExERqdAUUMpJ5plCHv1iIz9sOwpAr1bBTBvQBj8PVwdXJiIiUvEpoJSDhIOnGBWXyOGMXNycnXj69ubc3SFcQzoiIiKXSAGlDNlshv+u2ccr3+2kyGaoX8uTmTHRtKrj5+jSREREKhUFlDJy8nQBEz7fyE870gHoExnKlDtb4eOuIR0REZHSUkApA+v3n2RUXCJpWXm4uTgxuU9Lhl4bpiEdERGRy6SAcgVsNsO7K/cyfdkuim2GBrW9mBUTTfMQX0eXJiIiUqkpoFym4zn5/Gt+Eqt3Hwfgzqg6vNivFV5WdamIiMiV0tn0Mvy69wRj5iWSnp2Pu6sTz/dtxcD2dTWkIyIiUkYUUEqh2GZ4+6fdzPhxNzYDjQO9mTUsmiZBPo4uTUREpEpRQLlE6dl5jJ2XxNq9JwC4q11dnr+jJZ5u6kIREZGyprPrJViz+zhj5ydyPKcATzdnXuzXiv7RdR1dloiISJWlgHIBRcU23ly+m1kr9mAMNAv2YWZMNI0CvR1dmoiISJWmgHIeaZl5jJ6byLr9JwEYem09JvVpgburs4MrExERqfoUUM7h553pjP9sIydPF+Dl5szUAW3oGxnq6LJERESqDQWUvygstvHaDzt5b+U+AFqG+jIzJpqIAC8HVyYiIlK9ODnyw9955x0iIiJwd3enXbt2rF692pHl8OP2o/ZwMrxjOF8+3EnhRERExAEcdgVl/vz5jB07lnfeeYfOnTvz3nvv0atXL7Zt20a9evUcUlPPlsH8o0M9OjcMoFfrEIfUICIiImAxxhhHfPB1111HdHQ07777rn1Z8+bN6devH1OnTr3gtllZWfj5+ZGZmYmvr957IyIiUhmU5vztkCGegoIC4uPj6dGjR4nlPXr0YO3atWe1z8/PJysrq8QkIiIiVZdDAsrx48cpLi4mKCioxPKgoCDS0tLOaj916lT8/PzsU1hY2NUqVURERBzAoTfJ/v3lesaYc75wb+LEiWRmZtqnlJSUq1WiiIiIOIBDbpINCAjA2dn5rKsl6enpZ11VAbBarVit1qtVnoiIiDiYQ66guLm50a5dO5YtW1Zi+bJly+jUqZMjShIREZEKxGGPGY8bN467776b9u3b07FjR/7zn/9w8OBBHnroIUeVJCIiIhWEwwLK4MGDOXHiBM8//zypqam0atWKpUuXEh4e7qiSREREpIJw2O+gXAn9DoqIiEjlU+F/B0VERETkQhRQREREpMJRQBEREZEKRwFFREREKhwFFBEREalwHPaY8ZX488EjvTRQRESk8vjzvH0pDxBXyoCSnZ0NoJcGioiIVELZ2dn4+fldsE2l/B0Um83GkSNH8PHxOefLBS9VVlYWYWFhpKSk6PdUypn6+upSf1896uurR3199ZRXXxtjyM7OJjQ0FCenC99lUimvoDg5OVG3bt0y25+vr6++7FeJ+vrqUn9fPerrq0d9ffWUR19f7MrJn3STrIiIiFQ4CigiIiJS4VTrgGK1Wpk0aRJWq9XRpVR56uurS/199aivrx719dVTEfq6Ut4kKyIiIlVbtb6CIiIiIhWTAoqIiIhUOAooIiIiUuEooIiIiEiFU60DyjvvvENERATu7u60a9eO1atXO7qkSm/q1Klcc801+Pj4EBgYSL9+/di5c2eJNsYYJk+eTGhoKB4eHtx4441s3brVQRVXDVOnTsVisTB27Fj7MvVz2Tp8+DD/+Mc/qFWrFp6enrRt25b4+Hj7evV32SgqKuLpp58mIiICDw8PGjRowPPPP4/NZrO3UV9fnlWrVtGnTx9CQ0OxWCx89dVXJdZfSr/m5+czatQoAgIC8PLyom/fvhw6dKh8CjbV1Lx584yrq6t5//33zbZt28yYMWOMl5eXOXDggKNLq9R69uxpZs+ebbZs2WKSkpJM7969Tb169UxOTo69zbRp04yPj4/58ssvzebNm83gwYNNSEiIycrKcmDllde6detM/fr1TZs2bcyYMWPsy9XPZefkyZMmPDzc3Hvvveb33383ycnJZvny5WbPnj32NurvsvHiiy+aWrVqma+//tokJyebzz//3Hh7e5s333zT3kZ9fXmWLl1qnnrqKfPll18awCxcuLDE+kvp14ceesjUqVPHLFu2zCQkJJhu3bqZyMhIU1RUVOb1VtuAcu2115qHHnqoxLJmzZqZJ554wkEVVU3p6ekGMCtXrjTGGGOz2UxwcLCZNm2avU1eXp7x8/Mz//73vx1VZqWVnZ1tGjdubJYtW2a6du1qDyjq57L1+OOPmy5dupx3vfq77PTu3dvcd999JZb179/f/OMf/zDGqK/Lyt8DyqX0a0ZGhnF1dTXz5s2ztzl8+LBxcnIy3333XZnXWC2HeAoKCoiPj6dHjx4llvfo0YO1a9c6qKqqKTMzEwB/f38AkpOTSUtLK9H3VquVrl27qu8vw8iRI+nduzfdu3cvsVz9XLYWL15M+/btGThwIIGBgURFRfH+++/b16u/y06XLl348ccf2bVrFwAbN25kzZo13HbbbYD6urxcSr/Gx8dTWFhYok1oaCitWrUql76vlC8LvFLHjx+nuLiYoKCgEsuDgoJIS0tzUFVVjzGGcePG0aVLF1q1agVg799z9f2BAweueo2V2bx580hISGD9+vVnrVM/l619+/bx7rvvMm7cOJ588knWrVvH6NGjsVqt3HPPPervMvT444+TmZlJs2bNcHZ2pri4mJdeeomhQ4cC+m6Xl0vp17S0NNzc3KhZs+ZZbcrj3FktA8qfLBZLiXljzFnL5PLFxsayadMm1qxZc9Y69f2VSUlJYcyYMfzwww+4u7uft536uWzYbDbat2/PlClTAIiKimLr1q28++673HPPPfZ26u8rN3/+fD799FPi4uJo2bIlSUlJjB07ltDQUIYPH25vp74uH5fTr+XV99VyiCcgIABnZ+ezEl96evpZ6VEuz6hRo1i8eDE///wzdevWtS8PDg4GUN9fofj4eNLT02nXrh0uLi64uLiwcuVKZsyYgYuLi70v1c9lIyQkhBYtWpRY1rx5cw4ePAjoe12WHn30UZ544gmGDBlC69atufvuu/nXv/7F1KlTAfV1ebmUfg0ODqagoIBTp06dt01ZqpYBxc3NjXbt2rFs2bISy5ctW0anTp0cVFXVYIwhNjaWBQsW8NNPPxEREVFifUREBMHBwSX6vqCggJUrV6rvS+Hmm29m8+bNJCUl2af27dszbNgwkpKSaNCggfq5DHXu3Pmsx+V37dpFeHg4oO91WTpz5gxOTiVPTc7OzvbHjNXX5eNS+rVdu3a4urqWaJOamsqWLVvKp+/L/LbbSuLPx4w/+OADs23bNjN27Fjj5eVl9u/f7+jSKrWHH37Y+Pn5mRUrVpjU1FT7dObMGXubadOmGT8/P7NgwQKzefNmM3ToUD0iWAb++hSPMernsrRu3Trj4uJiXnrpJbN7924zZ84c4+npaT799FN7G/V32Rg+fLipU6eO/THjBQsWmICAAPPYY4/Z26ivL092drZJTEw0iYmJBjDTp083iYmJ9p/XuJR+feihh0zdunXN8uXLTUJCgrnpppv0mHF5mDVrlgkPDzdubm4mOjra/iisXD7gnNPs2bPtbWw2m5k0aZIJDg42VqvV3HDDDWbz5s2OK7qK+HtAUT+XrSVLlphWrVoZq9VqmjVrZv7zn/+UWK/+LhtZWVlmzJgxpl69esbd3d00aNDAPPXUUyY/P9/eRn19eX7++edz/vs8fPhwY8yl9Wtubq6JjY01/v7+xsPDw9x+++3m4MGD5VKvxRhjyv66jIiIiMjlq5b3oIiIiEjFpoAiIiIiFY4CioiIiFQ4CigiIiJS4SigiIiISIWjgCIiIiIVjgKKiIiIVDgKKCIiIlLhKKCIiIhIhaOAIiIiIhWOAoqIiIhUOAooIiIiUuH8/1aQZO/CBxvMAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig1 = plt.figure(label=\"first_figure\")\n", + "ax = plt.subplot(1,1,1)\n", + "ax.plot(x,x*2)\n", + "ax.text(0.02,0.95,config[\"pathPP\"],ha=\"left\",transform=ax.transAxes)\n", + "ax.text(0.02,0.90,config[\"dora_id\"],ha=\"left\",transform=ax.transAxes)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7943ebef-004c-4b1e-b71a-ad4e6fa6c3f3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA3s0lEQVR4nO3dZ3hUZcLG8Tt1UkgCISQhkEqHICAg0sGCi4iLXZSi2FhRQfZVZN21rCK4RV0bCiqiiLJ2xQrSi/RQhSQkhEAIoWbSy8x5PwTjoqiUmTlT/r/ryoecOTD39Rgyt+c853n8DMMwBAAA4CL+ZgcAAAC+hfIBAABcivIBAABcivIBAABcivIBAABcivIBAABcivIBAABcivIBAABcKtDsAD9nt9tVUFCgiIgI+fn5mR0HAACcBsMwVFJSooSEBPn7//a1DbcrHwUFBUpMTDQ7BgAAOAv5+flq3rz5b57jduUjIiJCUl34yMhIk9MAAIDTYbValZiYWP85/lvcrnz8eKslMjKS8gEAgIc5nSkTTDgFAAAuRfkAAAAuRfkAAAAuRfkAAAAuRfkAAAAuRfkAAAAuRfkAAAAuRfkAAAAuRfkAAAAudcblY9myZRo6dKgSEhLk5+enTz755KTXDcPQY489poSEBIWGhmrAgAHavn27o/ICAAAPd8blo6ysTJ06ddKLL754ytf/8Y9/6JlnntGLL76odevWKT4+XpdeeqlKSkrOOSwAAPB8Z7y3y+DBgzV48OBTvmYYhp577jk9/PDDuvrqqyVJs2fPVlxcnObOnau77rrr3NICAACP59A5H7m5uSosLNSgQYPqj1ksFvXv31+rVq065Z+pqqqS1Wo96QsAADjeoZIqvbQ4W1O+2GFqDofualtYWChJiouLO+l4XFyc8vLyTvlnpk6dqscff9yRMQAAwAmGYWht7lG9/X2evtleqBqboaAAP93Zr4WaRFhMyeTQ8vGjn2+naxjGr26xO3nyZE2cOLH+e6vVqsTERGfEAgDAZ5RU1ujjTfs15/s8ZR4srT/eJamhRvRIVkSIUyrAaXHoO8fHx0uquwLStGnT+uNFRUW/uBryI4vFIovFnOYFAIC32VFg1Zw1efpk036VV9skSaFBARrWJUE390hWerMokxM6uHykpqYqPj5eCxYsUJcuXSRJ1dXVWrp0qZ5++mlHvhUAADihqtamr7YW6u3v87Qh71j98RZNwjXywmRd3bW5IkOCTEx4sjMuH6WlpcrOzq7/Pjc3VxkZGYqOjlZSUpImTJigp556Sq1atVKrVq301FNPKSwsTDfddJNDgwMA4Ov2H6/QO9/nad66fB0pq5YkBfr76bIO8RpxYbIuTIv+1WkPZjrj8rF+/XoNHDiw/vsf52uMHj1ab775ph588EFVVFTo7rvv1rFjx9SjRw99++23ioiIcFxqAAB8lN1uaEX2Yb21Ok+Ldh6U3ag7Hh8Zopt6JOnG7omKjQwxN+Tv8DMMwzA7xP+yWq2KiopScXGxIiMjzY4DAIBbKC6v0fsb8vXOmr3KPVxWf7xXi8Ya1TNZl7SLU2CAebumnMnnt3lTXQEAwO/aXlCst1fn6ZOM/aqssUuSIiyBuqZrc424MEktYz3vzgLlAwAAN1Nda9fX2wv11qo9Wv8/E0jbxkdoZM9kDevcTOEWz/0I99zkAAB4mYPWSr2zZq/eXbtXh0qqJNVNIP1DerxG9UxR95RGbjmB9ExRPgAAMNGPK5C+tbpuBdLaEzNIm0RYdHOPJA2/IElxbj6B9ExRPgAAMEFFtU2fZOzX7FV7tLPwp53fu6c00qieKbqsQ7yCA82bQOpMlA8AAFwo/2i53lq9R/PW5ctaWStJCgny11VdmmnkhSlqn+D9T3pSPgAAcDLDqFubY/aqPfpuZ5F+XOQiMTpUoy5M0fXdEhUV5j4rkDob5QMAACcprarVRxv3afaqPdp96Ke1Ofq2itHonika2DZWAf6eP4H0TFE+AABwsD2HyzR79R69v36fSqvqbq2EBwfo2q7NNbJnilrGNjA5obkoHwAAOIBhGFqedVhvrtqjxbt+urWSFhOuUT2TdU3X5opwo83dzET5AADgHJSduLXy5s9urQxs00S39E5V35Yx8vfBWyu/hfIBAMBZ2HvkxFMr6/NVcuKplQaWQF3btblG9UxWWhPfvrXyWygfAACcJsMwtDrniGat3KOFPxysv7WSGhOu0dxaOW2UDwAAfkdljU2fZRTojZW5Jy0I1rdVjMb0TlX/1k24tXIGKB8AAPyKg9ZKvb06T3PX7tXRsmpJUmhQgK7p2ky39ErxyB1l3QHlAwCAn9mcf1xvrMzVF1sO1O+10qxhqEb1TNaN3ZN8akEwZ6B8AAAgqdZm1zfbD+qNlbna8D/b2HdPaaQxvVN1afs4BQZ4514rrkb5AAD4tOKKGs1bt1ezV+Vp//EKSVJQgJ+GnpegMX1Sld4syuSE3ofyAQDwSXsOl2nWyly9v2GfyqttkqTo8GCN6JGkET2TFRvhXdvYuxPKBwDAZ/z4qOwbK3JP2uCtTVyExvRJ0R87N1NIUIC5IX0A5QMA4PWqa+36fHOBXluRqx8OWOuPD2zTRLf1SVPvlo3l58ejsq5C+QAAeK1jZdWau3avZq/ao6KSKklSSJC/ru3aXLf2TlULViE1BeUDAOB1cg6V6o2Vufpgwz5V1tglSbERFo3ulaKbeySpYViwyQl9G+UDAOAVDMPQ9zlH9fqKHC38oaj+ePumkbq9b6quOC9BwYE8KusOKB8AAI9WY7Priy0HNHN5jrYX/DSf45J2sRrTJ1U905jP4W4oHwAAj2StrNF7a/dq1so9OlBcKYn5HJ6C8gEA8Cj7jpVr1so9mrcuX6VVdVvZxzSwaHTPZN18YbKiw5nP4e4oHwAAj7Bl33HNXJ6rL7cekO3EfiutYhvojr5purJzAutzeBDKBwDAbdnthhbvKtKMZTlak3u0/njvlo11e980DWjdhPkcHojyAQBwO1W1Nn26qUAzlucou6hUkhTo76crOyXotr6p6pDAfiuejPIBAHAbxeU1mrMmT2+u2qNDJxYFa2AJ1E09knRr7xQ1jQo1OSEcgfIBADDdvmPlemPFHr23bm/9Jm/xkSEa0ydFN16QpMiQIJMTwpEoHwAA02wvKNaMZTmav+WnSaRt4yN0R980De3EomDeivIBAHApwzC0avcRvbJ0t5ZnHa4/3rtlY93RN039mUTq9SgfAACXqLXZ9dW2Qr26bLe27a9bidTfTxpyXoLu6pem9GZMIvUVlA8AgFNVVNv0wYZ8zVyeq71HyyXVrUR6Q7dE3d43TYnRYSYnhKtRPgAATnGsrFpvrc7T7NV7dLSsWpLUKCxIo3ulaFTPFFYi9WGUDwCAQxUcr9Bry3P17tq9qqipe3KleaNQ3dkvTdd1TVRoMCuR+jrKBwDAIbKLSvTK0hx9smm/ak88udK+aaTu6p+mIR2bKjCAJ1dQh/IBADgnG/ce0/Qlu7Vgx8H6YxemRetPA1qqX6sYnlzBL1A+AABnzDAMLc08pOlLdp+058plHeI0tn8LdUlqZGI6uDvKBwDgtNnshr7cekDTl+zWjgN1j8sGBfhpWOdmuqt/mlrGRpicEJ6A8gEA+F1VtTZ9vHG/Xl2Wo9zDZZKksOAA3XRBkm7rm8qeKzgjlA8AwK8qq6rVu2v36rXluSq0VkqSokKDdGvvFI3umaJGPC6Ls0D5AAD8wvHyas1eladZq3J1vLxGkhQXadEdfdM0/IIkhVv4+MDZ46cHAFCvyFqp11bk6p3v81R2YnfZ5MZhGtu/ha4+v5ksgazRgXNH+QAAaN+xcr26NEfz1uerutYuqW532bsHttTl6fGs0QGHonwAgA/bfahU05fsPmlhsPOTGuqei1pqYJtY1uiAU1A+AMAH7Siw6qUl2fpy6wEZdZ1DvVs21riBLdUzrTGlA05F+QAAH7Jp7zG9uChb3+0sqj92SbtY3T2wpc5nYTC4COUDAHzAmpwjemFRtlZkH5Yk+flJQzo21biBLdWuaaTJ6eBrKB8A4KUMw9DK7CN6flGW1p5YAj3Q30/DujTT3QNaKK1JA5MTwldRPgDAyxiGocW7ivTComxt2ntckhQc4K9ruzXXn/q3UGJ0mLkB4fMoHwDgJex2Q9/uOKgXF2dp2/66fVcsgf4afkGS7uqfxhLocBuUDwDwcHa7oS+3HdAL32Vr18ESSXX7roy4MFm3901VbESIyQmBk1E+AMBD2eyG5m8p0IuLspVVVCpJamAJ1OheybqtT5qi2XcFboryAQAeptZm1/wtB/TCoiztPlS3w2xkSKDG9EnVrb1SFRUWZHJC4LdRPgDAQ9Ta7Po0o0AvLs6u39Y+KjRIt/VJ1S29UxQZQumAZ6B8AICbq7XZ9fGm/Xppcbb2HCmXJDUMC9IdfdM0qmeyIigd8DCUDwBwUz+WjhcXZyvvROmIDg/WHX3TNLJnshqwrT08FD+5AOBmam12fZJRoBcWZdWXjsbhwbqzX5pGXJiscEoHPBw/wQDgJn6c0/HCoqz62yuNw4N1V/+60hEWzK9seAd+kgHAZLU2uz7bXKAXFv00kTT6xJWOUT0pHfA+/EQDgElsdkOfby7Q899lKedE6WgUFqQ7+7XQqJ7cXoH3cvhPdm1trR577DG98847KiwsVNOmTXXLLbfor3/9q/z9/R39dgDgcX5ckfS5hVnKPrE4WKOwIN3RL02je6ZQOuD1HP4T/vTTT+uVV17R7Nmz1aFDB61fv1633nqroqKiNH78eEe/HQB4DMMw9M32g3puYaZ2FtYtgx4VGqQ7+6VpdK8Unl6Bz3D4T/rq1av1xz/+UUOGDJEkpaSk6N1339X69esd/VYA4BEMw9CinUV6ZkGmthfUbfgWYQnU7X3TdGsfFgeD73F4+ejTp49eeeUVZWZmqnXr1tq8ebNWrFih55577pTnV1VVqaqqqv57q9Xq6EgAYArDMLQs67CeWZCpzfnHJUnhwQEa0ydVt/dJYxl0+CyHl49JkyapuLhYbdu2VUBAgGw2m6ZMmaLhw4ef8vypU6fq8ccfd3QMADDV9zlH9O9vd2ndnmOSpNCgAI3ulaI7+7HhG+Dw8jFv3jzNmTNHc+fOVYcOHZSRkaEJEyYoISFBo0eP/sX5kydP1sSJE+u/t1qtSkxMdHQsAHCJTXuP6d/fZmpF9mFJkiXQXyMvTNZd/VuoSYTF5HSAe/AzDMNw5F+YmJiohx56SOPGjas/9uSTT2rOnDnauXPn7/55q9WqqKgoFRcXKzIy0pHRAMBpdhRY9cyCXVr4Q5EkKSjATzd2T9I9F7VUXGSIyekA5zuTz2+HX/koLy//xSO1AQEBstvtjn4rADBddlGpnl2YqS+2HJAk+ftJ15zfXPdd3EqJ0WEmpwPck8PLx9ChQzVlyhQlJSWpQ4cO2rRpk5555hmNGTPG0W8FAKbJP1qu5xZm6eNN+2Q/cf14aKcETbiklVo0aWBuOMDNOfy2S0lJif72t7/p448/VlFRkRISEjR8+HA98sgjCg7+/UlW3HYB4M6KrJV6cXG23l27VzW2ul+fl7aP08RLW6tdU35nwXedyee3w8vHuaJ8AHBHxeU1emXZbs1amavKmrrbyH1bxejPg9qoc2JDc8MBbsDUOR8A4E3Kq2s1a+Uevbp0t6yVtZKkzokN9eAf2qhXixiT0wGeifIBAKdQXWvXe+v26vnvsnW4tG4hxDZxEfq/y9roknax8vPzMzkh4LkoHwDwP2x2Q59m7NezCzOVf7RCkpQYHaqJl7bWlZ2aKcCf0gGcK8oHAOin/Vf++c2u+k3fmkRYdN9FLXVD9yQFB7IrN+AolA8APm9D3lFN+2pn/VLokSGBGjughW7plaKwYH5NAo7GvyoAPivzYIn+8fUuLfzhoKS6pdBv6Z2iu/u3ZNM3wIkoHwB8zv7jFXp2QaY+2li3QJi/n3R9t0SNv6SVmkaFmh0P8HqUDwA+41hZtV5ekq3Zq/NUXVu3VscfOsTr/y5ro5axrEoKuArlA4DXq6yx6c1Ve/TS4myVnFir48K0aE36Q1t1SWpkcjrA91A+AHgtm93Qx5v265lvd6mguFKS1DY+QpMGt9WA1k1YqwMwCeUDgNcxDENLMw9p2lc76x+bTYgK0Z8HtdGwLqzVAZiN8gHAq2zbX6ypX/2gldlHJEkRIYG6Z2BLje6VopCgAJPTAZAoHwC8RP7Rcv3r2136NKNAkhQc4K/RvZI1bmBLNQz7/R21AbgO5QOARyuuqNHLi7M1a+UeVdvqnmC5qkszTby0tRKjw0xOB+BUKB8APFJ1rV3vrMnTf77L0vHyGklS75aNNXlwO6U3izI5HYDfQvkA4FEMw9A32ws17aud2nOkXJLUKraB/jKkHU+wAB6C8gHAY2zae0xTvvhB6/Pq9mCJaWDRxEtb6/puzRUYwMZvgKegfABwe/lHy/X01zs1f8sBSVJIkL/u7JumO/u3UAMLv8YAT8O/WgBuq6SyRi8uztasFXWTSf38pGvPb64/D2qj+KgQs+MBOEuUDwBup9Zm17z1+Xrm20wdKauWVDeZ9OHL26t9QqTJ6QCcK8oHALeyPOuQnpz/g3YdrFuZNK1JuB6+vJ0uahvLZFLAS1A+ALiF7KJSTflihxbvOiRJahgWpAkXt9LNFyYriMmkgFehfAAw1bGyaj23MFNz1uyVzW4o0N9Po3qm6L6LWZkU8FaUDwCmqLHZNef7PD23MEvFFXWLhF3SLk5/ubyt0po0MDkdAGeifABwuaWZh/TE/B3KLiqVVLfN/SNXtFevljEmJwPgCpQPAC6Tc6hUU774Qd/tLJIkRYcH6/8GtdEN3RPZ5h7wIZQPAE5nrazRC99l6c1Ve1Rjq5vXMbpXiu67uJWiQoPMjgfAxSgfAJzGZjf03/X5+tc3u+rX6xjYpon+ekV7tWBeB+CzKB8AnGLdnqN69NPt2nHAKqluvY6/XdFeA9vEmpwMgNkoHwAcqrC4UtO++kGfZBRIkiJDAjXhktYa2ZP1OgDUoXwAcIiqWpveWLFHLyzKUnm1TX5+0o3dk/R/g1qrcQOL2fEAuBHKB4Bztnhnkf4+f4dyD5dJks5PaqjHr0xXx+ZRJicD4I4oHwDO2p7DZXpi/o76R2djGlg0eXBbXdWlmfx5dBbAr6B8ADhj5dW1enFRtl5bnqtqm12B/n4a0ydV917UUhEhPDoL4LdRPgCcNsMw9PW2Qj0xf4cKiislSX1bxejRoR3UMpZHZwGcHsoHgNOSc6hUj362XcuzDkuSmjUM1SND22tQ+zi2ugdwRigfAH5TeXWtXlqcrRnLclRjMxQc4K+7+qfp7gEtFRocYHY8AB6I8gHglAzD0DfbD+qJ+Tu0/3iFJKl/6yZ67MoOSo0JNzkdAE9G+QDwC7mHy/TYZ9u1NPOQJG6xAHAsygeAepU1Nr28OFuvLM1Rtc2u4AB/3dkvTeMGcosFgONQPgBIkpbsKtKjn21X3pFySVK/1k30OLdYADgB5QPwcYXFlXpi/g59sfWAJCku0qJHh3bQ4PR4brEAcArKB+Cjam12vbU6T88syFRpVa38/aRbe6fq/ktbq4GFXw0AnIffMIAPysg/roc/3qrtBXXb3XdObKgpV6WrQwJ7sQBwPsoH4EOKK2r0z2926p01e2UYddvdTxrcVsO7J7EXCwCXoXwAPsAwDH2+5YD+/vkOHS6tkiRd3aWZ/jKknWLY7h6Ai1E+AC+Xf7Rcf/t0m5bsqluzI61JuJ4clq5eLWJMTgbAV1E+AC9Va7PrjZW5enZBlipqbAoO8Ne4gS01dkCaLIGs2QHAPJQPwAttzj+uyR9t1Y4DdRNKe6RG66mrO6pFE3aeBWA+ygfgRUqravWvb3bprdV7ZDekqNAgPXx5O13XrTlrdgBwG5QPwEss2HFQj3y6TQeKKyVJwzon6K9XtGdCKQC3Q/kAPNyhkio99vl2fbGlboXSpOgwPTksXf1aNzE5GQCcGuUD8FCGYeiDDfv05Bc/qLiiRgH+frqjb5rGX9yKTeAAuDXKB+CB8o+W6y8fb9XyrMOSpA4JkXr6mvOU3owVSgG4P8oH4EFsdkOzVubq399mqqLGJkugvyZc0lp39E1VYIC/2fEA4LRQPgAPsauwRJM+3KKM/OOS6h6fnXbNeWx5D8DjUD4AN1dda9eLi7M1fUm2amyGIiyBmnx5O93YPZH9WAB4JMoH4Ma27ivWAx9s1s7CEknSpe3j9MQf0xUfFWJyMgA4e5QPwA1V1dr0n4VZenVZjmx2Q43Dg/X4HztoSMemLBYGwONRPgA3k5F/XA+8v1lZRaWSpKGdEvTY0PZqzGJhALwE5QNwE5U1Nj27IFMzl+fIbkgxDSx6cli6/pAeb3Y0AHAoygfgBjbkHdUDH2xRzqEySdJVXZrpkSvaq1F4sMnJAMDxKB+AiSprbPrXN7v0+spcGYYUG2HRlKs66tL2cWZHAwCnoXwAJtm095j+/P7m+qsd15zfXI9c0V5RYUEmJwMA53LKkoj79+/XiBEj1LhxY4WFhalz587asGGDM94K8DhVtTb985udumb6KuUcKlNshEVv3NJN/76+E8UDgE9w+JWPY8eOqXfv3ho4cKC++uorxcbGavfu3WrYsKGj3wrwODsKrJr434z6dTuGdU7QY1d2UMMw5nYA8B0OLx9PP/20EhMTNWvWrPpjKSkpjn4bwKPU2ux6Zelu/ee7LNXYDEWHB2vKsHQN7tjU7GgA4HIOv+3y2WefqVu3brruuusUGxurLl26aObMmb96flVVlaxW60lfgDfJLirVNdNX6V/fZqrGZmhQ+zh9e38/igcAn+Xw8pGTk6Pp06erVatW+uabbzR27Fjdd999euutt055/tSpUxUVFVX/lZiY6OhIgCnsdkOvr8jVkOeXa/O+YkWEBOrZGzrp1ZFdFcOCYQB8mJ9hGIYj/8Lg4GB169ZNq1atqj923333ad26dVq9evUvzq+qqlJVVVX991arVYmJiSouLlZkZKQjowEus/94hf783wx9n3NUktSvdRM9fU1HNY0KNTkZADiH1WpVVFTUaX1+O3zOR9OmTdW+ffuTjrVr104ffvjhKc+3WCyyWPi/QHgHwzD0aUaB/vbpNpVU1iosOEAPD2mnmy5IYk8WADjB4eWjd+/e2rVr10nHMjMzlZyc7Oi3AtxKcXmNHv5kq+ZvOSBJ6pLUUM9e31kpMeEmJwMA9+Lw8nH//ferV69eeuqpp3T99ddr7dq1mjFjhmbMmOHotwLcxsrsw/rzfzer0FqpAH8/jb+4le4e0EKBAU5ZSgcAPJrD53xI0vz58zV58mRlZWUpNTVVEydO1B133HFaf/ZM7hkBZqussekfX+/SGytzJUlpMeF69obO6pTY0NxgAOBiZ/L57ZTycS4oH/AUOwqsmjBvkzIPlkqSRlyYpL9c3k5hwexaAMD3mDrhFPB2druh11bk6J/f7FKNzVBMA4v+ee15Gtg21uxoAOARKB/AGSiyVmrifzdrRfZhSdKl7eM07eqOasy6HQBw2igfwGn67oeDeuCDLTpaVq3QoAA9MrS9buyeyCO0AHCGKB/A76issWnqlz9o9uo8SVL7ppF6fngXtYxtYHIyAPBMlA/gN2QeLNG9czdp18G6XWhv65OqB//QRpbAAJOTAYDnonwAp2AYhuas2asn5+9QVa1dMQ2C9c/rOmlgGyaVAsC5onwAP3O0rFqTPtyiBTsOSqrbl+Xf13VSkwgmlQKAI1A+gP+xJueIxr+XoUJrpYIC/DTpD201pneq/P2ZVAoAjkL5ACTZ7IZeXpytZxdmym7UrVT6/PAuSm8WZXY0APA6lA/4vEMlVbp/Xkb92h1Xd2mmJ4alK9zCPw8AcAZ+u8Knrco+rPHzMnSopEohQf76+x/TdV3X5qzdAQBORPmAT7LZDf3nuyy9sChLhiG1jmugl246X63iIsyOBgBej/IBn3PQWqnx723S9zlHJUk3dEvUY1d2UGgwa3cAgCtQPuBTlmYe0sR5GTpSVq2w4AA9dVVHDevSzOxYAOBTKB/wCTa7of8szNQLi7NlGFK7ppF66aYuSmvCEukA4GqUD3i9o2XVGv/eJi3Pqnua5aYeSXrkivYKCeI2CwCYgfIBr7Zx7zGNe2ejDhRXKiTIX1Ov7qirujQ3OxYA+DTKB7ySYRh6a3Wenvxih2pshtJiwjV9RFe1iedpFgAwG+UDXqesqlYPfbRVn28ukCQNTo/XP649TxEhQSYnAwBIlA94meyiEo2ds1HZRaUK8PfT5MFtdVufVBYNAwA3QvmA1/h8c4EmfbhF5dU2xUZY9NLN56t7SrTZsQAAP0P5gMertdk17audem1FriSpZ1pjPT+8i5pEWExOBgA4FcoHPNrRsmrd++5Grcw+Ikka27+F/m9QawUG+JucDADwaygf8FjbC4p151sbtP94hcKCA/Sv6zrp8o5NzY4FAPgdlA94pE8z9mvSh1tUWWNXcuMwzRjZjcdoAcBDUD7gUX4+v2NAmyb6zw1dFBXGY7QA4CkoH/AYP5/fMW5gC028tI0C/HmMFgA8CeUDHuHn8zv+fV0nDWZ+BwB4JMoH3N5nmwv04AebVVljV0rjMM0Y1U2t45jfAQCeivIBt2W3G3p2YaZeWJQtifkdAOAtKB9wS+XVtfrzfzfrq22FkqS7+qXpwT+0ZX4HAHgBygfczoHiCt0+e722F1gVFOCnp67qqOu6JZodCwDgIJQPuJVNe4/pzrc36FBJlRqHB+vVkV3Vjf1ZAMCrUD7gNj7N2K8HPtii6lq72sZHaOaobkqMDjM7FgDAwSgfMN3PJ5Ze0i5Wz93YRQ0s/HgCgDfitztMVV5dq4nzNuvr7ScmlvZP04OXMbEUALwZ5QOmKbJWaszsddq236rgAH89dXVHXdu1udmxAABORvmAKTIPlujWWeu0/3iFosODNYOJpQDgMygfcLlV2Yd115wNKqmsVVpMuGbd2l3JjcPNjgUAcBHKB1zqww379NBHW1RjM9Q9pZFmjOymRuHBZscCALgQ5QMuYRiG/vNdlp5bmCVJuuK8pvrXdZ0UEhRgcjIAgKtRPuB01bV2/eXjrfpgwz5J0p8GtNADg9rInydaAMAnUT7gVMUVNbr7nQ1amX1EAf5+euKP6bqpR5LZsQAAJqJ8wGn2H6/QrbPWKvNgqcKCA/TSzedrYJtYs2MBAExG+YBT7Cy0atTra1VUUqW4SIteH91d6c2izI4FAHADlA843Lo9R3Xbm+tkraxV67gGevPWC5TQMNTsWAAAN0H5gEMt3HFQ4+ZuVFWtXd2SG+n10d0VFRZkdiwAgBuhfMBh/rs+X5M/2iqb3dDFbWP14k3nKzSYR2kBACejfOCcGYahV5flaNpXOyVJ13ZtrmlXd1RggL/JyQAA7ojygXNitxua+tUPmrk8V1LdrrQP/aGt/PxYwwMAcGqUD5y1Gptdkz7Yoo827Zck/eXytrqzXwuTUwEA3B3lA2elotqmu9/ZoMW7DinA30//uOY8XdO1udmxAAAegPKBM1ZcUaMxb67ThrxjCgny18s3n6+L2saZHQsA4CEoHzgjR8uqNfL1NdpeYFVkSKBm3dpdXZOjzY4FAPAglA+ctiJrpUa8vkaZB0sV0yBYb9/WQ+2aRpodCwDgYSgfOC0Fxyt082trlHu4THGRFr1z+4VqGdvA7FgAAA9E+cDv2nukXDe99r32HatQ80ahmnv7hUpqHGZ2LACAh6J84DftPlSqm2euUaG1UimNwzT3jgvZpwUAcE4oH/hVOwutGvHaGh0urVar2AZ65/Yeio0MMTsWAMDDUT5wStv2F2vE62t0vLxG7ZtG6u3bLlDjBhazYwEAvADlA7+wIe+Ybpm1ViWVteqc2FCzb72AnWkBAA5D+cBJ1uYe1S2z1qq82qYLUqP1xi3d1cDCjwkAwHH4VEG9DXk/FY++rWI0Y2Q3hQYHmB0LAOBlnL7n+dSpU+Xn56cJEyY4+61wDjbtPabRb6xTebVNfVrGaOYoigcAwDmcWj7WrVunGTNm6LzzznPm2+Acbdl3XKPeWKvSqlr1TGusmaO6KSSI4gEAcA6nlY/S0lLdfPPNmjlzpho1auSst8E52ra/WCNfr5tcekFKtF6/hSseAADnclr5GDdunIYMGaJLLrnkN8+rqqqS1Wo96QuusbPQqpGvr1FxRY3OT2qoN27trrBgpgEBAJzLKZ807733njZu3Kh169b97rlTp07V448/7owY+A1ZB0t088w1OlZeo06JDfXmmAt4qgUA4BIOv/KRn5+v8ePHa86cOQoJ+f3VMCdPnqzi4uL6r/z8fEdHws9kF5Vq+Mw1OlJWrfRmkXprzAWKDGEdDwCAa/gZhmE48i/85JNPdNVVVykg4Kd5AzabTX5+fvL391dVVdVJr/2c1WpVVFSUiouLFRnJdu2Olnu4TDe8ulpFJVVq1zRS797RQw3Dgs2OBQDwcGfy+e3w6+wXX3yxtm7detKxW2+9VW3bttWkSZN+s3jAufYeKddNM79XUUmV2sRF6J3bKR4AANdzePmIiIhQenr6ScfCw8PVuHHjXxyH6xSVVGrE62t0oLhSLWMb6J07eig6nOIBAHA9py8yBvMVV9Ro9BvrtPdouZKiwzT39h6KYZM4AIBJXPJ4w5IlS1zxNjiFyhqb7pi9Xj8csCqmgUVv33aBYiN/fyIwAADOwpUPL1Zrs+ueuZu0ds9RRVgCNXtMdyU3Djc7FgDAx1E+vJRhGJr80VYt/OGgggP99drobuqQEGV2LAAAKB/eatrXO/X+hn3y95NeHN5FPdIamx0JAABJlA+vNGPZbr26NEeSNO2a8zSoQ7zJiQAA+Anlw8u8vz5fT325U5I0eXBbXd8t0eREAACcjPLhRRbsOKiHPqpb4O3Ofmm6q38LkxMBAPBLlA8vsTb3qO6Zu1E2u6FruzbX5MFtzY4EAMApUT68QM6hUt3x1npV1dp1SbtYTbu6o/z8/MyOBQDAKVE+PNyxsmqNeXOdiitq1CWpoV686XwFBvCfFQDgvviU8mDVtXbdNWeD9hwpV7OGoZoxsptCgti4DwDg3igfHurHRcTW5h5VA0ug3rilu5pEsF8LAMD9UT481MtLduvDjScWEbupi9rER5gdCQCA00L58EBfbj2gf36zS5L02JUdNKBNrMmJAAA4fZQPD7M5/7jun5chSbqlV4pG9UwxNQ8AAGeK8uFB9h+v0O0nHqkd2KaJ/nZFe7MjAQBwxigfHqK0qla3vblOh0qq1DY+Qi/cdL4C/FnLAwDgeSgfHsBmN3Tfu5u0s7BEMQ0sev2W7mpgCTQ7FgAAZ4Xy4QGmfPGDFu0skiXQX6+N7qZmDUPNjgQAwFmjfLi5jzft0xsrcyVJz1zfWZ0TG5obCACAc0T5cGM7C62afGKX2nsvaqkh5zU1OREAAOeO8uGmSipr9Kc5G1VZY1ffVjGacElrsyMBAOAQlA83ZBiGHnh/i3IPlykhKkT/ubELT7YAALwG5cMNzVyeo6+3FyoowE8vj+iq6PBgsyMBAOAwlA83833OET39dd3S6Y8M7cAEUwCA16F8uJEia6XumbtJNruhq7o004geSWZHAgDA4SgfbqLGZte4uRt1uLRKbeIiNOWqdPn5Mc8DAOB9KB9u4umvdmrdnmOKsATqlZFdFRbMCqYAAO9E+XADX249oNdW1C0k9s/rOik1JtzkRAAAOA/lw2S7D5Xqgfc3S5Lu6pemP6THm5wIAADnonyYqLy6VmPf3qCyapt6pEbrgcvamB0JAACno3yYaOqXO5VVVKrYCIteuKmLAgP4zwEA8H582plkaeYhvf19niTp39d3UmxEiMmJAABwDcqHCY6XV9fP8xjdM1l9WzUxOREAAK5D+TDBXz/ZpqKSKqU1CddDg9uZHQcAAJeifLjYpxn7NX/LAQX4++nZ6zsrNDjA7EgAALgU5cOFDhRX6G+fbJMk3XtRS3Vi3xYAgA+ifLiI3W7owQ+2yFpZq07NozRuYEuzIwEAYArKh4u8/X2elmcdVkiQv565obOCeKwWAOCj+AR0gd2HSjX1qx8kSZMHt1OLJg1MTgQAgHkoH05WY7Nr4rwMVdbY1bdVjEZemGx2JAAATEX5cLKXFmdr875iRYYE6p/XdpK/v5/ZkQAAMBXlw4k25x/XC4uyJUlPDEtXfBSrmAIAQPlwkopqm+7/b4ZsdkNDOyXoj52bmR0JAAC3QPlwkucXZSnnUJniIi164o8dzI4DAIDboHw4we5DpXpteY4k6clhHdUwLNjkRAAAuA/Kh4MZhqHHPtuuGpuhi9rG6tL2cWZHAgDArVA+HOyb7YVannVYwYH+enRoe7PjAADgdigfDlReXau/f75DkjS2X5qSG4ebnAgAAPdD+XCglxZnq6C4Us0ahupPA9i7BQCAU6F8OEju4TLNXJYrSXpkaHuFBgeYnAgAAPdE+XCAHyeZVtvs6t+6iQYxyRQAgF9F+XCAb3cc1NLMQwoO8NdjV3aQnx9LqAMA8GsoH+eootpWP8n0jn6pSo1hkikAAL+F8nGOXl6Srf3HK5QQFaJxA5lkCgDA76F8nIM9h8v06tK6lUwfGdpeYcGBJicCAMD9UT7OkmEYevzzukmmfVvF6LIO8WZHAgDAI1A+ztLCH4q0eNchBQX46XEmmQIAcNooH2ehssamxz/fLkm6vW+a0po0MDkRAACeg/JxFmYsy9G+YxVqGhWiey9ikikAAGeC8nGGSipr9NryukmmDw1uyyRTAADOEOXjDL21Ok/Wylq1aBKuK85LMDsOAAAeh/JxBsqra/X6irr9W+65qKUC/JlkCgDAmaJ8nIG5a/bqaFm1kqLDNJSrHgAAnBXKx2mqrLHp1WV1cz3uHtBCgQEMHQAAZ8Phn6BTp05V9+7dFRERodjYWA0bNky7du1y9Nu43H/X5+tQSZUSokJ09fnNzY4DAIDHcnj5WLp0qcaNG6fvv/9eCxYsUG1trQYNGqSysjJHv5XLVNfa9cqS3ZKksQNaKDiQqx4AAJwthz8n+vXXX5/0/axZsxQbG6sNGzaoX79+jn47l/ho4z4VFFeqSYRF13dLNDsOAAAezemLVBQXF0uSoqOjT/l6VVWVqqqq6r+3Wq3OjnRGam12vXziqsdd/dIUEhRgciIAADybU+8fGIahiRMnqk+fPkpPTz/lOVOnTlVUVFT9V2Kie11Z+HxLgfYeLVd0eLBu6pFkdhwAADyeU8vHPffcoy1btujdd9/91XMmT56s4uLi+q/8/HxnRjojNruhFxdlS5Ju65PKaqYAADiA0z5N7733Xn322WdatmyZmjf/9adDLBaLLBaLs2Kck6+3FWr3oTJFhgRqVM9ks+MAAOAVHF4+DMPQvffeq48//lhLlixRamqqo9/CJex2Qy8sypIk3dI7VREhQSYnAgDAOzi8fIwbN05z587Vp59+qoiICBUWFkqSoqKiFBoa6ui3c5rvdhZpZ2GJwoMDNKZ3itlxAADwGg6f8zF9+nQVFxdrwIABatq0af3XvHnzHP1WTmMYhl48cdVjZM8UNQwLNjkRAADewym3XTzdsqzD2ryvWCFB/rq9r2feNgIAwF2xVOfPGIahF76ru+px0wXJimngnpNhAQDwVJSPn/k+56jW5x1TcIC/7uyXZnYcAAC8DuXjZ6YvrVvN9PruzRUfFWJyGgAAvA/l438cLq3SiqxDkqTb+3DVAwAAZ6B8/I+vtxXKbkjnNY9SSky42XEAAPBKlI//8cWWA5Kkyzs2NTkJAADei/JxwqGSKq3JPSJJGkL5AADAaSgfJ3y9/adbLonRYWbHAQDAa1E+TvjyxC0XrnoAAOBclA+dfMuF+R4AADgX5UM/3XLpxC0XAACcjvIh6YstBZK46gEAgCv4fPkoKqnU2tyjkigfAAC4gs+Xj2+2ccsFAABX8vny8cXWE0+5nMdVDwAAXMGny0dRSaXWcMsFAACX8uny8c22QhmG1CmxoZo34pYLAACu4NPlY/6JhcWu4KoHAAAu47Plo6ikUmv31N1yGdwx3uQ0AAD4Dp8tH1+fuOXSmVsuAAC4lM+Wjy/YywUAAFP4ZPkosnLLBQAAs/hk+fh6O7dcAAAwi0+Wj/qnXFhYDAAAl/O58lFkrdS6+lsulA8AAFzN58rHVyeecumS1FDNGoaaHQcAAJ/jc+Wjfi8XrnoAAGAKnyof3HIBAMB8PlU+uOUCAID5fKp8sLAYAADm85nycdBaqXV5dbdcLqd8AABgmkCzA7hKWHCAnhyWrqyDpUrglgsAAKbxmfIRERKkm3skmx0DAACf5zO3XQAAgHugfAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJeifAAAAJdyu11tDcOQJFmtVpOTAACA0/Xj5/aPn+O/xe3KR0lJiSQpMTHR5CQAAOBMlZSUKCoq6jfP8TNOp6K4kN1uV0FBgSIiIuTn53fWf4/ValViYqLy8/MVGRnpwIQ4FcbbdRhr12GsXYexdh1njbVhGCopKVFCQoL8/X97VofbXfnw9/dX8+bNHfb3RUZG8oPsQoy36zDWrsNYuw5j7TrOGOvfu+LxIyacAgAAl6J8AAAAl/La8mGxWPToo4/KYrGYHcUnMN6uw1i7DmPtOoy167jDWLvdhFMAAODdvPbKBwAAcE+UDwAA4FKUDwAA4FKUDwAA4FJeWz5efvllpaamKiQkRF27dtXy5cvNjuTxpk6dqu7duysiIkKxsbEaNmyYdu3addI5hmHoscceU0JCgkJDQzVgwABt377dpMTeYerUqfLz89OECRPqjzHOjrV//36NGDFCjRs3VlhYmDp37qwNGzbUv854O0Ztba3++te/KjU1VaGhoUpLS9Pf//532e32+nMY67OzbNkyDR06VAkJCfLz89Mnn3xy0uunM65VVVW69957FRMTo/DwcF155ZXat2+fcwIbXui9994zgoKCjJkzZxo7duwwxo8fb4SHhxt5eXlmR/Nol112mTFr1ixj27ZtRkZGhjFkyBAjKSnJKC0trT9n2rRpRkREhPHhhx8aW7duNW644QajadOmhtVqNTG551q7dq2RkpJinHfeecb48ePrjzPOjnP06FEjOTnZuOWWW4w1a9YYubm5xsKFC43s7Oz6cxhvx3jyySeNxo0bG/Pnzzdyc3ON999/32jQoIHx3HPP1Z/DWJ+dL7/80nj44YeNDz/80JBkfPzxxye9fjrjOnbsWKNZs2bGggULjI0bNxoDBw40OnXqZNTW1jo8r1eWjwsuuMAYO3bsScfatm1rPPTQQyYl8k5FRUWGJGPp0qWGYRiG3W434uPjjWnTptWfU1lZaURFRRmvvPKKWTE9VklJidGqVStjwYIFRv/+/evLB+PsWJMmTTL69Onzq68z3o4zZMgQY8yYMScdu/rqq40RI0YYhsFYO8rPy8fpjOvx48eNoKAg47333qs/Z//+/Ya/v7/x9ddfOzyj1912qa6u1oYNGzRo0KCTjg8aNEirVq0yKZV3Ki4uliRFR0dLknJzc1VYWHjS2FssFvXv35+xPwvjxo3TkCFDdMkll5x0nHF2rM8++0zdunXTddddp9jYWHXp0kUzZ86sf53xdpw+ffrou+++U2ZmpiRp8+bNWrFihS6//HJJjLWznM64btiwQTU1NSedk5CQoPT0dKeMvdttLHeuDh8+LJvNpri4uJOOx8XFqbCw0KRU3scwDE2cOFF9+vRRenq6JNWP76nGPi8vz+UZPdl7772njRs3at26db94jXF2rJycHE2fPl0TJ07UX/7yF61du1b33XefLBaLRo0axXg70KRJk1RcXKy2bdsqICBANptNU6ZM0fDhwyXxs+0spzOuhYWFCg4OVqNGjX5xjjM+O72ufPzIz8/vpO8Nw/jFMZy9e+65R1u2bNGKFSt+8Rpjf27y8/M1fvx4ffvttwoJCfnV8xhnx7Db7erWrZueeuopSVKXLl20fft2TZ8+XaNGjao/j/E+d/PmzdOcOXM0d+5cdejQQRkZGZowYYISEhI0evTo+vMYa+c4m3F11th73W2XmJgYBQQE/KKpFRUV/aL14ezce++9+uyzz7R48WI1b968/nh8fLwkMfbnaMOGDSoqKlLXrl0VGBiowMBALV26VM8//7wCAwPrx5JxdoymTZuqffv2Jx1r166d9u7dK4mfa0d64IEH9NBDD+nGG29Ux44dNXLkSN1///2aOnWqJMbaWU5nXOPj41VdXa1jx4796jmO5HXlIzg4WF27dtWCBQtOOr5gwQL16tXLpFTewTAM3XPPPfroo4+0aNEipaamnvR6amqq4uPjTxr76upqLV26lLE/AxdffLG2bt2qjIyM+q9u3brp5ptvVkZGhtLS0hhnB+rdu/cvHhnPzMxUcnKyJH6uHam8vFz+/id/7AQEBNQ/astYO8fpjGvXrl0VFBR00jkHDhzQtm3bnDP2Dp/C6gZ+fNT29ddfN3bs2GFMmDDBCA8PN/bs2WN2NI/2pz/9yYiKijKWLFliHDhwoP6rvLy8/pxp06YZUVFRxkcffWRs3brVGD58OI/JOcD/Pu1iGIyzI61du9YIDAw0pkyZYmRlZRnvvPOOERYWZsyZM6f+HMbbMUaPHm00a9as/lHbjz76yIiJiTEefPDB+nMY67NTUlJibNq0ydi0aZMhyXjmmWeMTZs21S8xcTrjOnbsWKN58+bGwoULjY0bNxoXXXQRj9qeqZdeeslITk42goODjfPPP7/+cVCcPUmn/Jo1a1b9OXa73Xj00UeN+Ph4w2KxGP369TO2bt1qXmgv8fPywTg71ueff26kp6cbFovFaNu2rTFjxoyTXme8HcNqtRrjx483kpKSjJCQECMtLc14+OGHjaqqqvpzGOuzs3jx4lP+fh49erRhGKc3rhUVFcY999xjREdHG6GhocYVV1xh7N271yl5/QzDMBx/PQUAAODUvG7OBwAAcG+UDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FKUDwAA4FL/D5C7NWTjXUMyAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig2 = plt.figure(label=\"second_figure\")\n", + "ax = plt.subplot(1,1,1)\n", + "ax.plot(x,np.sqrt(x))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diagnostics/forcing_feedback/forcing_feedback.py b/diagnostics/forcing_feedback/forcing_feedback.py index 23b968bf0..d8c8baf9e 100644 --- a/diagnostics/forcing_feedback/forcing_feedback.py +++ b/diagnostics/forcing_feedback/forcing_feedback.py @@ -59,7 +59,7 @@ try: os.system("python " + os.environ["POD_HOME"] + "/" + "forcing_feedback_kernelcalcs.py") - print('Working Directory is ' + os.environ['WK_DIR']) + print('Working Directory is ' + os.environ['WORK_DIR']) print('Forcing Feedback POD is executing') except RuntimeError as e1: print('WARNING', e1.errno, e1.strerror) diff --git a/diagnostics/forcing_feedback/forcing_feedback_plot.py b/diagnostics/forcing_feedback/forcing_feedback_plot.py index 865c46def..e1d5cf2f6 100644 --- a/diagnostics/forcing_feedback/forcing_feedback_plot.py +++ b/diagnostics/forcing_feedback/forcing_feedback_plot.py @@ -35,18 +35,18 @@ # Read in model results -nc_pl = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_Planck.nc") -nc_lr = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LapseRate.nc") -nc_lw_q = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_WaterVapor.nc") -nc_sw_q = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_WaterVapor.nc") -nc_alb = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SfcAlbedo.nc") -nc_lw_c = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_Cloud.nc") -nc_sw_c = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_Cloud.nc") -nc_lw_irf = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_IRF.nc") -nc_sw_irf = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_IRF.nc") -nc_lw_netrad = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_LW_Rad.nc") -nc_sw_netrad = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_SW_Rad.nc") -nc_strat = xr.open_dataset(os.environ["WK_DIR"] + "/model/netCDF/fluxanom2D_StratFB.nc") +nc_pl = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_Planck.nc") +nc_lr = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LapseRate.nc") +nc_lw_q = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_WaterVapor.nc") +nc_sw_q = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_WaterVapor.nc") +nc_alb = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SfcAlbedo.nc") +nc_lw_c = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_Cloud.nc") +nc_sw_c = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_Cloud.nc") +nc_lw_irf = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_IRF.nc") +nc_sw_irf = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_IRF.nc") +nc_lw_netrad = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_LW_Rad.nc") +nc_sw_netrad = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_SW_Rad.nc") +nc_strat = xr.open_dataset(os.environ["WORK_DIR"] + "/model/netCDF/fluxanom2D_StratFB.nc") lat_model = nc_sw_irf.lat.values weights_model = np.cos(np.deg2rad(lat_model)) @@ -147,7 +147,7 @@ xterms = ['', 'IRF', ''] ax2.set_xticks([r for r in range(len(xterms))], xterms) plt.tight_layout() -plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_CMIP6scatter.eps') +plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_CMIP6scatter.eps') plt.close() if np.max(nc_sw_irf.lon.values) >= 300: # convert 0-360 lon to -180-180 lon for plotting diff --git a/diagnostics/forcing_feedback/forcing_feedback_util.py b/diagnostics/forcing_feedback/forcing_feedback_util.py index 6396bb862..22c38817d 100644 --- a/diagnostics/forcing_feedback/forcing_feedback_util.py +++ b/diagnostics/forcing_feedback/forcing_feedback_util.py @@ -381,7 +381,7 @@ def fluxanom_nc_create(variable, lat, lon, fbname): """ var = xr.DataArray(variable, coords=[lat, lon], dims=['lat', 'lon'], name=fbname) - var.to_netcdf(os.environ['WK_DIR'] + '/model/netCDF/fluxanom2D_' + fbname + '.nc') + var.to_netcdf(os.environ['WORK_DIR'] + '/model/netCDF/fluxanom2D_' + fbname + '.nc') return None @@ -449,7 +449,7 @@ def bargraph_plotting(model_bar, obs_bar, var_units, var_legnames, var_filename) plt.ylabel(var_units) plt.xticks([r + barWidth for r in range(len(model_bar))], var_legnames) plt.legend(loc="upper right") - plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_globemean_' + var_filename + '.eps') + plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_globemean_' + var_filename + '.eps') plt.close() return None @@ -540,7 +540,7 @@ def map_plotting_4subs(cbar_levs1, cbar_levs2, var1_name, var1_model, \ if np.all(cbar_levs1 == cbar_levs2): cbar = plt.colorbar(cs, ax=axs.ravel(), orientation='horizontal', aspect=25) cbar.set_label(var_units) - plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_maps_' + \ + plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_maps_' + \ var_filename + '.eps', bbox_inches='tight') plt.close() @@ -593,7 +593,7 @@ def map_plotting_2subs(cbar_levs, var_name, var_model, cbar = plt.colorbar(cs, ax=axs.ravel(), orientation='horizontal', aspect=25) cbar.set_label(var_units) - plt.savefig(os.environ['WK_DIR'] + '/model/PS/forcing_feedback_maps_' + \ + plt.savefig(os.environ['WORK_DIR'] + '/model/PS/forcing_feedback_maps_' + \ var_filename + '.eps', bbox_inches='tight') plt.close() diff --git a/diagnostics/mixed_layer_depth/mixed_layer_depth.html b/diagnostics/mixed_layer_depth/mixed_layer_depth.html index be95dcb97..ea0c97376 100644 --- a/diagnostics/mixed_layer_depth/mixed_layer_depth.html +++ b/diagnostics/mixed_layer_depth/mixed_layer_depth.html @@ -2,31 +2,33 @@ This file is the HTML output template for the example diagnostic and shows how you should describe and link to the plots your diagnostic generates. The framework will "fill in the blanks" corresponding to environment - variables and copy the file to . + variables and copy the file to . EPS or PS files saved by your diagnostic are converted by the framework into bitmaps in the following locations: - /model/PS/.eps -> /model/.png - /obs/PS/.eps -> /obs/.png + /model/PS/.eps -> /model/.png + /obs/PS/.eps -> /obs/.png so the target of each link only needs to be the relative path, eg. "model/.png". --> MDTF mixed layer depth

Mixed Layer Depth

-

This POD computes mixed layer depth from CMIP6 monthly temperature and salinity. Mixed layer depth computed from the EN4 reanalysis temperature and salinity is included to compare with models.

+

This POD computes mixed layer depth from CMIP6 monthly temperature and salinity. + Mixed layer depth computed from the EN4 reanalysis temperature and salinity is included to compare with models. +

-

These figures show the mixed layer depth climatology for each month. Note that the colorbar varies between subplots. Users may wish to modify this.

+

These figures show the mixed layer depth climatology for each month. Note that the colorbar varies between subplots. + Users may wish to modify this. +

The results from this POD are to be appear in a paper being prepared for the MAPP team special issue.

-
Mean mixed layer depth +< color=navy>Mean mixed layer depth MODEL OBS - -
diff --git a/diagnostics/mixed_layer_depth/mixed_layer_depth.py b/diagnostics/mixed_layer_depth/mixed_layer_depth.py index c20dc0385..6fa9b7236 100755 --- a/diagnostics/mixed_layer_depth/mixed_layer_depth.py +++ b/diagnostics/mixed_layer_depth/mixed_layer_depth.py @@ -83,7 +83,7 @@ DATADIR = os.environ["DATADIR"] OBS_DATA = os.environ["OBS_DATA"] -WK_DIR = os.environ["WK_DIR"] +WK_DIR = os.environ["WORK_DIR"] CASENAME = os.environ["CASENAME"] modelname = CASENAME @@ -240,6 +240,7 @@ def computemean(field=None, firstyr=2010, lastyr=2014): Returns ------- + field.mean """ firstyr = int(firstyr) @@ -254,7 +255,7 @@ def computemean(field=None, firstyr=2010, lastyr=2014): return field.mean(dim="year") -def monthlyplot(field, edgec=None, figfile=None, cmapname="PuBu_r", myname=modelname): +def monthlyplot(field, figfile=None, cmapname="PuBu_r", myname=modelname): fig = plt.figure(figsize=(12, 10)) cmap_c = cmapname @@ -283,6 +284,8 @@ def monthlyplot(field, edgec=None, figfile=None, cmapname="PuBu_r", myname=model plt.close() return # 1) Loading model data files: ############################################### + + print("DATADIR: ", DATADIR) print("OBS_DATA: ", OBS_DATA) print("---") @@ -290,10 +293,10 @@ def monthlyplot(field, edgec=None, figfile=None, cmapname="PuBu_r", myname=model so_var = "{so_var}".format(**os.environ) thetao_var = "{thetao_var}".format(**os.environ) -firstyr = "{FIRSTYR}".format(**os.environ) -lastyr = "{LASTYR}".format(**os.environ) +firstyr = "{startdate}".format(**os.environ) +lastyr = "{enddate}".format(**os.environ) -print("thetao_var, so_var, firstyr, lastyr: ", thetao_var, so_var, firstyr, lastyr) +print("thetao_var, so_var, startdate, enddate: ", thetao_var, so_var, firstyr, lastyr) input_file_so = DATADIR+"/mon/"+CASENAME+"."+so_var+".mon.nc" input_file_thetao = DATADIR+"/mon/"+CASENAME+"."+thetao_var+".mon.nc" diff --git a/diagnostics/mixed_layer_depth/settings.jsonc b/diagnostics/mixed_layer_depth/settings.jsonc index 6fb83fc26..1d7700cfa 100644 --- a/diagnostics/mixed_layer_depth/settings.jsonc +++ b/diagnostics/mixed_layer_depth/settings.jsonc @@ -2,15 +2,23 @@ "settings" : { "driver" : "mixed_layer_depth.py", "long_name" : "Mixed Layer Depth Calculation", - "realm" : "ocean", + "convention" : "cmip", "description" : "Compute Mixed Layer Depth Mean from 3D Ocean Variables", "runtime_requirements": { "python3": ["scipy","matplotlib","xarray","netCDF4","xesmf","dask","numpy","pandas","cartopy","gsw"] } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "depth", "units": "m", @@ -22,12 +30,14 @@ "varlist" : { "so": { "standard_name": "sea_water_salinity", + "realm": "ocean", "frequency" : "mon", "units": "psu", "dimensions": ["time", "lev", "lat", "lon"] }, "thetao": { "standard_name": "sea_water_potential_temperature", + "realm": "ocean", "units": "degC", "frequency" : "mon", "dimensions": ["time", "lev","lat", "lon"] diff --git a/diagnostics/ocn_surf_flux_diag/groupby_variables.py b/diagnostics/ocn_surf_flux_diag/groupby_variables.py index ac32c0adc..f89b2a5e4 100644 --- a/diagnostics/ocn_surf_flux_diag/groupby_variables.py +++ b/diagnostics/ocn_surf_flux_diag/groupby_variables.py @@ -4,9 +4,8 @@ from scipy import stats - - -def bin_2d(ds,bin1_var,bin2_var,target_var,bin1=10,bin2=10,stTconfint=0.99,bin1_range=None,bin2_range=None): +def bin_2d(ds: xr.Dataset, bin1_var: str, bin2_var: str, target_var, bin1: int = 10, bin2: int = 10, + stTconfint: float = 0.99, bin1_range=None, bin2_range=None): """ The function is written to bin the variable (target_var) in a xr.Dataset based on two other variables (bin1_var, bin2_var) in the same xr.Dataset. @@ -44,57 +43,54 @@ def bin_2d(ds,bin1_var,bin2_var,target_var,bin1=10,bin2=10,stTconfint=0.99,bin1_ """ warnings.simplefilter("ignore") - - if (bin1_range is not None) and (bin2_range is not None): - bin1_interval = np.linspace(np.min(bin1_range),np.max(bin1_range),bin1+1) - bin2_interval = np.linspace(np.min(bin2_range),np.max(bin2_range),bin2+1) + if bin1_range is not None and bin2_range is not None: + bin1_interval = np.linspace(np.min(bin1_range), np.max(bin1_range), bin1+1) + bin2_interval = np.linspace(np.min(bin2_range), np.max(bin2_range), bin2+1) else: - bin1_interval = np.linspace(ds[bin1_var].min(),ds[bin1_var].max(),bin1+1) - bin2_interval = np.linspace(ds[bin2_var].min(),ds[bin2_var].max(),bin2+1) + bin1_interval = np.linspace(ds[bin1_var].min(), ds[bin1_var].max(), bin1+1) + bin2_interval = np.linspace(ds[bin2_var].min(), ds[bin2_var].max(), bin2+1) bin1_val = np.convolve(bin1_interval, np.ones(2), 'valid') / 2. bin2_val = np.convolve(bin2_interval, np.ones(2), 'valid') / 2. - ds_bin = xr.Dataset() + ds_bin=xr.Dataset() for tvar in target_var: - bin_matrix = np.zeros([bin1,bin2]) - std_matrix = np.zeros([bin1,bin2]) - count_matrix = np.zeros([bin1,bin2]) + bin_matrix = np.zeros([bin1, bin2]) + std_matrix = np.zeros([bin1, bin2]) + count_matrix = np.zeros([bin1, bin2]) print(tvar) for nbin1 in range(bin1): for nbin2 in range(bin2): da_temp = ds[tvar]\ - .where((ds[bin1_var]>=bin1_interval[nbin1])& - (ds[bin1_var]=bin2_interval[nbin2])& - (ds[bin2_var]= bin1_interval[nbin1])& + (ds[bin1_var] < bin1_interval[nbin1+1])& + (ds[bin2_var] >= bin2_interval[nbin2])& + (ds[bin2_var] < bin2_interval[nbin2+1]), drop=True) - bin_matrix[nbin1,nbin2] = da_temp.mean(skipna=True).values - std_matrix[nbin1,nbin2] = da_temp.std(skipna=True).values - count_matrix[nbin1,nbin2] = da_temp.count().values + bin_matrix[nbin1, nbin2] = da_temp.mean(skipna=True).values + std_matrix[nbin1, nbin2] = da_temp.std(skipna=True).values + count_matrix[nbin1, nbin2] = da_temp.count().values da_bin = xr.DataArray(bin_matrix, coords=[(bin1_var, bin1_val), (bin2_var, bin2_val)]) da_std = xr.DataArray(std_matrix, coords=[(bin1_var, bin1_val), (bin2_var, bin2_val)]) da_count = xr.DataArray(count_matrix, coords=[(bin1_var, bin1_val), (bin2_var, bin2_val)]) - ### calculate confidence interval + # calculate confidence interval # calculate the error bar base on the number of standard error # the number related to dist. percentage is derived base on Students's T # distribution da_dof = da_count-1 alpha = 1.0-stTconfint - da_nst = stats.t.ppf(1.0-(alpha/2.0),da_dof) # 2-side + da_nst = stats.t.ppf(1.0-(alpha/2.0), da_dof) # 2-side da_stderr = da_std/np.sqrt(da_count) da_conf = da_nst*da_stderr - - ds_bin[tvar] = da_bin + ds_bin[tvar]=da_bin ds_bin['%s_std'%tvar] = da_std ds_bin['%s_count'%tvar] = da_count - ds_bin['%s_conf_%0.2f'%(tvar,stTconfint)] = da_conf - + ds_bin['%s_conf_%0.2f'%(tvar, stTconfint)] = da_conf return ds_bin diff --git a/diagnostics/ocn_surf_flux_diag/model_read.py b/diagnostics/ocn_surf_flux_diag/model_read.py index 4f573a0bb..25c51c8c7 100644 --- a/diagnostics/ocn_surf_flux_diag/model_read.py +++ b/diagnostics/ocn_surf_flux_diag/model_read.py @@ -3,7 +3,8 @@ import metpy.calc from metpy.units import units -def regional_var(varlist, lon_lim, lat_lim, year_lim): + +def regional_var(varlist: list, lon_lim: list, lat_lim: list, year_lim: list): """ The model io for needed variables and crop based on the region and period of interest. @@ -46,26 +47,25 @@ def regional_var(varlist, lon_lim, lat_lim, year_lim): ds_atm_regional (xr.Dataset) : a xarray dataset including all variables """ - - for nvar,var in enumerate(varlist): + for nvar, var in enumerate(varlist): ds_temp = xr.open_mfdataset(varlist[nvar]) if nvar == 0: ds_atm = ds_temp.copy() else: - ds_atm = xr.merge([ds_atm,ds_temp],compat='override') + ds_atm = xr.merge([ds_atm, ds_temp] ,compat='override') ########################################################################### # cropping dataset ds_atm_regional = ((ds_atm).where( - (ds_atm.lon>=np.array(lon_lim).min())& - (ds_atm.lon<=np.array(lon_lim).max())& - (ds_atm.lat>=np.array(lat_lim).min())& - (ds_atm.lat<=np.array(lat_lim).max())& - (ds_atm['time.year']>=np.array(year_lim).min())& - (ds_atm['time.year']<=np.array(year_lim).max()), + (ds_atm.lon >= np.array(lon_lim).min())& + (ds_atm.lon <= np.array(lon_lim).max())& + (ds_atm.lat >= np.array(lat_lim).min())& + (ds_atm.lat <= np.array(lat_lim).max())& + (ds_atm['time.year'] >= np.array(year_lim).min())& + (ds_atm['time.year'] <= np.array(year_lim).max()), drop=True) ) @@ -82,9 +82,9 @@ def regional_var(varlist, lon_lim, lat_lim, year_lim): da_q_surf = ds_atm_regional['huss'].copy()*np.nan mixing_ratio_surf = (metpy.calc.saturation_mixing_ratio( - ds_atm_regional['psl'].values*units.Pa, - ds_atm_regional['ts'].values*units.K) - ) + ds_atm_regional['psl'].values*units.Pa, + ds_atm_regional['ts'].values*units.K) + ) q_surf = metpy.calc.specific_humidity_from_mixing_ratio(mixing_ratio_surf) # unit for mixing ratio and specific humidity is kg/kg diff --git a/diagnostics/ocn_surf_flux_diag/obs_data_read.py b/diagnostics/ocn_surf_flux_diag/obs_data_read.py index 872808812..58c4fc788 100644 --- a/diagnostics/ocn_surf_flux_diag/obs_data_read.py +++ b/diagnostics/ocn_surf_flux_diag/obs_data_read.py @@ -5,7 +5,7 @@ from metpy.units import units -def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): +def tao_triton(obs_data_dir, lon_lim: list, lat_lim: list, year_lim=None): """ The observational data io for latent heat flux corrections. If there are more needed model variables, a manual @@ -47,19 +47,18 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): """ # original variable name for dir name - dirlist = ['WindSpeed10m','SST','RH','Latent','airT','SLP'] + dirlist = ['WindSpeed10m', 'SST', 'RH', 'Latent', 'airT', 'SLP'] # original variable name for file name - varlist = ['wzs','sst','rh','qlat','airt','bp'] + varlist = ['wzs', 'sst', 'rh', 'qlat', 'airt', 'bp'] # original variable name in netcdf - varname = ['WZS_2401','T_25','RH_910','QL_137','AT_21','BP_915'] + varname = ['WZS_2401', 'T_25', 'RH_910', 'QL_137', 'AT_21', 'BP_915'] var_file_dict = {} - for nvar,var in enumerate(varlist): + for nvar, var in enumerate(varlist): var_file = [] - location = [] - files = os.listdir(os.path.join(obs_data_dir,dirlist[nvar],'daily')) + files = os.listdir(os.path.join(obs_data_dir, dirlist[nvar], 'daily')) for file in files: if file.startswith(var) and file.endswith('_dy.cdf'): startlen = len(var) @@ -67,7 +66,7 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): loc = file[startlen:-endlen] lat_index = loc.find('n') - if lat_index == -1 : + if lat_index == -1: lat_index = loc.find('s') # latitude @@ -88,9 +87,8 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): var_file.append(loc) var_file_dict[var] = var_file - # pick only overlapping stations - for nvar,var in enumerate(varlist): + for nvar, var in enumerate(varlist): if nvar == 0: final_list = var_file_dict[var] else: @@ -102,10 +100,10 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): print('TAO/TRITON stations:') for stn_loc in stn_locs: data_files = [] - for nvar,var in enumerate(varlist): - path = os.path.join(obs_data_dir,dirlist[nvar],'daily') + for nvar, var in enumerate(varlist): + path = os.path.join(obs_data_dir, dirlist[nvar], 'daily') file = var+stn_loc+'_dy.cdf' - data_files.append(os.path.join(path,file)) + data_files.append(os.path.join(path, file)) try : ds_list = [xr.open_dataset(file) for file in data_files] ds_mlist[stn_loc] = ds_list @@ -113,14 +111,11 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): except FileNotFoundError: print('%s not enough data'%stn_loc) - - - ### clean fill_value + # clean fill_value for stn_loc in stn_locs: - for nvar,var in enumerate(varname): + for nvar, var in enumerate(varname): ds_mlist[stn_loc][nvar][var] = ds_mlist[stn_loc][nvar][var]\ - .where(ds_mlist[stn_loc][nvar][var] != 1e35,other=np.nan) - + .where(ds_mlist[stn_loc][nvar][var] != 1e35, other=np.nan) # # Calculate $\Delta$q # @@ -133,16 +128,14 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): # slp to determine the mix ratio and then the specific humidity # - for stn_loc in stn_locs: temp_list = [ds_mlist[stn_loc][nvar][varname[nvar]].squeeze() for nvar in range(len(varname))] - ds_merge = xr.merge(temp_list,compat='override') + ds_merge = xr.merge(temp_list, compat='override') for nvar in range(len(varlist)): ds_merge = ds_merge.rename_vars({varname[nvar]:varlist[nvar]}) - # calculate 2m specific humidity mixing_ratio = metpy.calc.mixing_ratio_from_relative_humidity( ds_merge['bp'].values*units.hPa, @@ -178,36 +171,34 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): # crop time period if year_lim is not None: - for nstn,stn_loc in enumerate(stn_locs): + for nstn, stn_loc in enumerate(stn_locs): ds_mlist[stn_loc] = ds_mlist[stn_loc].where( - (ds_mlist[stn_loc]['time.year']>=np.array(year_lim).min())& - (ds_mlist[stn_loc]['time.year']<=np.array(year_lim).max()), + (ds_mlist[stn_loc]['time.year'] >= np.array(year_lim).min())& + (ds_mlist[stn_loc]['time.year'] <= np.array(year_lim).max()), drop=True) - - # stack all station - for nstn,stn_loc in enumerate(stn_locs): + for nstn, stn_loc in enumerate(stn_locs): if nstn == 0: ds_merge = ds_mlist[stn_loc] else: - ds_merge=xr.concat([ds_merge,ds_mlist[stn_loc]],dim='stn') + ds_merge = xr.concat([ds_merge, ds_mlist[stn_loc]], dim='stn') if len(stn_locs) == 1: ds_merge = ds_merge else: - ds_merge = ds_merge.stack(allstn=('stn','time')) + ds_merge = ds_merge.stack(allstn=('stn', 'time')) location = [] for loc in stn_locs: lat_index = loc.find('n') - if lat_index == -1 : + if lat_index == -1: lat_index = loc.find('s') # latitude if loc[lat_index] in ['n']: lat_loc = float(loc[:lat_index]) - else : + else: lat_loc = -float(loc[:lat_index]) # longitude @@ -216,19 +207,18 @@ def tao_triton(obs_data_dir,lon_lim,lat_lim,year_lim=None): else: lon_loc = -float(loc[lat_index+1:-1])+360. - location.append([lon_loc,lat_loc]) + location.append([lon_loc, lat_loc]) # change varname to be consistent with model - ds_merge = ds_merge.rename({'wzs':'sfcWind','dQ':'del_q','qlat':'hfls'}) + ds_merge = ds_merge.rename({'wzs': 'sfcWind', 'dQ': 'del_q', 'qlat': 'hfls'}) # change dq unit from kg/kg to g/kg ds_merge['del_q'] = ds_merge['del_q']*1e3 - return ds_merge,location + return ds_merge, location - -def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): +def rama(obs_data_dir: str, lon_lim: list, lat_lim: list, year_lim=None): """ The observational data io for latent heat flux corrections. If there are more needed model variables, a manual @@ -270,19 +260,18 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): """ # original variable name for dir name - dirlist = ['WindSpeed10m','SST','RH','Latent','airT','SLP'] + dirlist = ['WindSpeed10m', 'SST', 'RH', 'Latent', 'airT', 'SLP'] # original variable name for file name - varlist = ['wzs','sst','rh','qlat','airt','bp'] + varlist = ['wzs', 'sst', 'rh', 'qlat', 'airt', 'bp'] # original variable name in netcdf - varname = ['WZS_2401','T_25','RH_910','QL_137','AT_21','BP_915'] + varname = ['WZS_2401', 'T_25', 'RH_910', 'QL_137', 'AT_21', 'BP_915'] var_file_dict = {} - for nvar,var in enumerate(varlist): + for nvar, var in enumerate(varlist): var_file = [] - location = [] - files = os.listdir(os.path.join(obs_data_dir,dirlist[nvar],'daily')) + files = os.listdir(os.path.join(obs_data_dir, dirlist[nvar], 'daily')) for file in files: if file.startswith(var) and file.endswith('_dy.cdf'): startlen = len(var) @@ -290,7 +279,7 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): loc = file[startlen:-endlen] lat_index = loc.find('n') - if lat_index == -1 : + if lat_index == -1: lat_index = loc.find('s') # latitude @@ -311,9 +300,8 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): var_file.append(loc) var_file_dict[var] = var_file - # pick only overlapping stations - for nvar,var in enumerate(varlist): + for nvar, var in enumerate(varlist): if nvar == 0: final_list = var_file_dict[var] else: @@ -325,26 +313,24 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): print('RAMA stations:') for stn_loc in stn_locs: data_files = [] - for nvar,var in enumerate(varlist): - path = os.path.join(obs_data_dir,dirlist[nvar],'daily') - file = var+stn_loc+'_dy.cdf' - data_files.append(os.path.join(path,file)) + for nvar, var in enumerate(varlist): + path = os.path.join(obs_data_dir, dirlist[nvar], 'daily') + file = var + stn_loc + '_dy.cdf' + data_files.append(os.path.join(path, file)) try : ds_list = [xr.open_dataset(file) for file in data_files] ds_mlist[stn_loc] = ds_list print("%s included"%stn_loc) except FileNotFoundError: - print('%s not enough data'%stn_loc) - + print('%s not enough data' % stn_loc) - ### clean fill_value + # clean fill_value for stn_loc in stn_locs: - for nvar,var in enumerate(varname): + for nvar, var in enumerate(varname): ds_mlist[stn_loc][nvar][var] = ds_mlist[stn_loc][nvar][var]\ - .where(ds_mlist[stn_loc][nvar][var] != 1e35,other=np.nan) - + .where(ds_mlist[stn_loc][nvar][var] != 1e35, other=np.nan) - # # Calculate $\Delta$q + # Calculate $\Delta$q # # $\delta$ q is the specific humidity difference between # saturation q near surface determined by SST and 2m(3m) q. @@ -355,16 +341,14 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): # slp to determine the mix ratio and then the specific humidity # - for stn_loc in stn_locs: temp_list = [ds_mlist[stn_loc][nvar][varname[nvar]].squeeze() for nvar in range(len(varname))] - ds_merge = xr.merge(temp_list,compat='override') + ds_merge = xr.merge(temp_list, compat='override') for nvar in range(len(varlist)): ds_merge = ds_merge.rename_vars({varname[nvar]:varlist[nvar]}) - # calculate 2m specific humidity mixing_ratio = metpy.calc.mixing_ratio_from_relative_humidity( ds_merge['bp'].values*units.hPa, @@ -400,32 +384,30 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): # crop time period if year_lim is not None: - for nstn,stn_loc in enumerate(stn_locs): + for nstn, stn_loc in enumerate(stn_locs): ds_mlist[stn_loc] = ds_mlist[stn_loc].where( - (ds_mlist[stn_loc]['time.year']>=np.array(year_lim).min())& - (ds_mlist[stn_loc]['time.year']<=np.array(year_lim).max()), + (ds_mlist[stn_loc]['time.year'] >= np.array(year_lim).min()) & + (ds_mlist[stn_loc]['time.year'] <= np.array(year_lim).max()), drop=True) - - # stack all station - for nstn,stn_loc in enumerate(stn_locs): + for nstn, stn_loc in enumerate(stn_locs): if nstn == 0: ds_merge = ds_mlist[stn_loc] else: - ds_merge=xr.concat([ds_merge,ds_mlist[stn_loc]],dim='stn') - ds_merge = ds_merge.stack(allstn=('stn','time')) + ds_merge=xr.concat([ds_merge, ds_mlist[stn_loc]], dim='stn') + ds_merge = ds_merge.stack(allstn=('stn', 'time')) location = [] for loc in stn_locs: lat_index = loc.find('n') - if lat_index == -1 : + if lat_index == -1: lat_index = loc.find('s') # latitude if loc[lat_index] in ['n']: lat_loc = float(loc[:lat_index]) - else : + else: lat_loc = -float(loc[:lat_index]) # longitude @@ -434,13 +416,12 @@ def rama(obs_data_dir,lon_lim,lat_lim,year_lim=None): else: lon_loc = -float(loc[lat_index+1:-1])+360. - location.append([lon_loc,lat_loc]) + location.append([lon_loc, lat_loc]) # change varname to be consistent with model - ds_merge = ds_merge.rename({'wzs':'sfcWind','dQ':'del_q','qlat':'hfls'}) + ds_merge = ds_merge.rename({'wzs': 'sfcWind', 'dQ': 'del_q', 'qlat': 'hfls'}) # change dq unit from kg/kg to g/kg ds_merge['del_q'] = ds_merge['del_q']*1e3 - - - return ds_merge,location + + return ds_merge, location diff --git a/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.html b/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.html index 5d9e3499a..a0f6ff811 100644 --- a/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.html +++ b/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.html @@ -31,7 +31,7 @@

Example diagnostic: ocean surface flux diagnostic

- diff --git a/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.py b/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.py index f93f1502b..b4e914326 100644 --- a/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.py +++ b/diagnostics/ocn_surf_flux_diag/ocn_surf_flux_diag.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # coding: utf-8 -''' # MDTF ocean surface flux diagnostic - +""" The script generate the ocean surface latent heat flux diagnostic that depicts how latent heat fluxes vary with the near-surface specific humidity vertical gradient (dq) and surface wind speed (|V|) @@ -46,24 +45,23 @@ bin_2d : binned the variable/list of variables based on surface wind and near-surface water vapor vertical gradient (dq) - -''' +""" import os import warnings import xarray as xr import numpy as np import matplotlib.pyplot as plt from model_read import regional_var -from obs_data_read import tao_triton,rama +from obs_data_read import tao_triton, rama from groupby_variables import bin_2d warnings.simplefilter("ignore") -################## Define User setting ############# +# Define User setting ############# # Read framework variables (pod_env_vars) -lon_lim = [float(os.getenv("lon_min")),float(os.getenv("lon_max"))] -lat_lim = [float(os.getenv("lat_min")),float(os.getenv("lat_max"))] -year_lim = [float(os.getenv("FIRSTYR")),float(os.getenv("LASTYR"))] +lon_lim = [float(os.getenv("lon_min")), float(os.getenv("lon_max"))] +lat_lim = [float(os.getenv("lat_min")), float(os.getenv("lat_max"))] +year_lim = [float(os.getenv("startdate")), float(os.getenv("enddate"))] print("============== MDTF ocean surface flux diagnostic =============") print("User setting variables:") @@ -84,7 +82,7 @@ obs_taotriton_dir = '%s/tao_triton/'%os.getenv("OBS_DATA") obs_rama_dir = '%s/rama/'%os.getenv("OBS_DATA") -################## Main script start ############# +# Main script start ############# # Read model print("===========================") print('Reading model data...') @@ -96,10 +94,10 @@ # Read tao/triton print("===========================") print('Reading obs data (mooring from)...') -ds_tao,location_pac = tao_triton(obs_taotriton_dir, - lon_lim, - lat_lim, - year_lim) +ds_tao, location_pac = tao_triton(obs_taotriton_dir, + lon_lim, + lat_lim, + year_lim) # Read rama ds_rama,location_ind = rama(obs_rama_dir, @@ -109,7 +107,7 @@ # combine two observational data location = location_pac + location_ind -ds_stn = xr.concat([ds_tao,ds_rama],dim='allstn') +ds_stn = xr.concat([ds_tao, ds_rama], dim='allstn') # # Binning the latent heat flux @@ -124,62 +122,60 @@ print("===========================") print('Binning mooring variables including...') ds_stn_bin = bin_2d(ds_stn, - 'sfcWind','del_q',['hfls'], - bin1=nbin1,bin2=nbin2,stTconfint=stt, - bin1_range=bin1_range, - bin2_range=bin2_range) + 'sfcWind', 'del_q', ['hfls'], + bin1=nbin1,bin2=nbin2,stTconfint=stt, + bin1_range=bin1_range, + bin2_range=bin2_range) print("===========================") print('Binning model variables including...') ds_model_bin = bin_2d(ds_model.load(), - 'sfcWind','del_q',['hfls','pr'], - bin1=nbin1,bin2=nbin2,stTconfint=stt, - bin1_range=bin1_range, - bin2_range=bin2_range) + 'sfcWind', 'del_q', ['hfls', 'pr'], + bin1=nbin1,bin2=nbin2, stTconfint=stt, + bin1_range=bin1_range, + bin2_range=bin2_range) # output binned result in netCDF -(ds_model_bin.to_netcdf(os.getenv("WK_DIR") - + "/model/netCDF/model_binned.nc")) +(ds_model_bin.to_netcdf(os.getenv("WORK_DIR") + "/model/netCDF/model_binned.nc")) -(ds_stn_bin.to_netcdf(os.getenv("WK_DIR") - + "/obs/netCDF/obs_binned.nc")) +(ds_stn_bin.to_netcdf(os.getenv("WORK_DIR") + "/obs/netCDF/obs_binned.nc")) -#################### plotting ################### +# plotting ################### # Unit change for precip model rho_w = 1000. # kg/m^3 m2mm = 1000. # mm/m s2day = 60.*60.*24. # s/day pr_factor_model = 1./rho_w*m2mm*s2day # kg/m^2/s => mm/day -fig = plt.figure(1,figsize=(5,5)) +fig = plt.figure(1, figsize=(5,5)) -qlat_level = np.linspace(0,400,11) -dqlat_level = np.linspace(-50,50,11) -qlatratio_level = np.linspace(-0.2,0.2,11) -ratiolevel = np.linspace(0.01,0.01,1) -pr_level = np.array([-0.1,5,10]) +qlat_level = np.linspace(0, 400, 11) +dqlat_level = np.linspace(-50, 50, 11) +qlatratio_level = np.linspace(-0.2, 0.2, 11) +ratiolevel = np.linspace(0.01, 0.01, 1) +pr_level = np.array([-0.1, 5, 10]) # ======== hlfs obs ========= -ax1 = fig.add_axes([0,0,1,0.8]) +ax1 = fig.add_axes([0, 0, 1, 0.8]) im = ds_stn_bin['hfls'].plot.pcolormesh(x='sfcWind', - y='del_q', - ax=ax1, - levels=qlat_level, - extend='both', - cmap='plasma_r',) + y='del_q', + ax=ax1, + levels=qlat_level, + extend='both', + cmap='plasma_r') cb=im.colorbar cb.remove() -cs = ((ds_stn_bin['hfls_count']/ds_stn_bin['hfls_count'].sum()) - .plot.contour(x='sfcWind', - y='del_q', - ax=ax1, - levels=ratiolevel, - colors='w',linewidths=6) - ) +cs = ((ds_stn_bin['hfls_count']/ds_stn_bin['hfls_count'].sum()).plot.contour(x='sfcWind', + y='del_q', + ax=ax1, + levels=ratiolevel, + colors='w', + linewidths=6) + ) # verify that plotted data falls within defined ratiolevel range before adding labels # (useful when working with synthetic data) @@ -187,31 +183,31 @@ ax1.clabel(cs, ratiolevel, inline=True, fmt='%0.2f', fontsize=10) # ========== hlfs model =========== -ax2 = fig.add_axes([1,0,1,0.8]) +ax2 = fig.add_axes([1, 0, 1, 0.8]) im = ds_model_bin['hfls'].plot.pcolormesh(x='sfcWind', y='del_q', ax=ax2, levels=qlat_level, extend='both', - cmap='plasma_r',) + cmap='plasma_r') cb=im.colorbar cb.remove() -cbaxes=fig.add_axes([1.9,0,0.02,0.8]) -cbar=fig.colorbar(im,cax=cbaxes,orientation='vertical') +cbaxes=fig.add_axes([1.9, 0, 0.02, 0.8]) +cbar=fig.colorbar(im, cax=cbaxes, orientation='vertical') cbar.set_ticks(qlat_level) cbar.set_ticklabels(["%0.0f"%(n) for n in qlat_level]) -cbar.ax.tick_params(labelsize=15,rotation=0) +cbar.ax.tick_params(labelsize=15, rotation=0) cbar.set_label(label='Latent heat flux ($W/m^2$)', size=15, labelpad=15) -cs = ((ds_model_bin['hfls_count']/ds_model_bin['hfls_count'].sum()) - .plot.contour(x='sfcWind', - y='del_q', - ax=ax2, - levels=ratiolevel, - colors='w',linewidths=6) - ) +cs = ((ds_model_bin['hfls_count']/ds_model_bin['hfls_count'].sum()).plot.contour(x='sfcWind', + y='del_q', + ax=ax2, + levels=ratiolevel, + colors='w', + linewidths=6) + ) # verify that plotted data falls within defined ratiolevel range before adding labels # (useful when working with synthetic data) @@ -219,25 +215,25 @@ ax2.clabel(cs, ratiolevel, inline=True, fmt='%0.2f', fontsize=10) -ax1.set_yticks(np.arange(bin2_range[0],bin2_range[1]+1,2)) -ax1.set_xticks(np.arange(bin1_range[0],bin1_range[1]+1,2)) -ax1.set_xlim([bin1_range[0],bin1_range[1]]) -ax1.set_ylim([bin2_range[0],bin2_range[1]]) -ax1.tick_params(axis='y',labelsize=15,length=5,width=1) -ax1.tick_params(axis='x',labelsize=15,length=5,width=1) -ax1.set_ylabel('$\Delta$q ($g/kg$)',size=15) -ax1.set_xlabel('10m wind speed ($m/s$)',size=15) -ax1.set_title('Obs',color='black', weight='bold',size=22) - -ax2.set_yticks(np.arange(bin2_range[0],bin2_range[1]+1,2)) -ax2.set_xticks(np.arange(bin1_range[0],bin1_range[1]+1,2)) -ax2.set_xlim([bin1_range[0],bin1_range[1]]) -ax2.set_ylim([bin2_range[0],bin2_range[1]]) -ax2.tick_params(axis='y',labelsize=15,length=5,width=1) -ax2.tick_params(axis='x',labelsize=15,length=5,width=1) -ax2.set_ylabel('',size=15) -ax2.set_xlabel('10m wind speed ($m/s$)',size=15) -ax2.set_title('Model',color='black', weight='bold',size=22) +ax1.set_yticks(np.arange(bin2_range[0], bin2_range[1]+1, 2)) +ax1.set_xticks(np.arange(bin1_range[0], bin1_range[1]+1, 2)) +ax1.set_xlim([bin1_range[0], bin1_range[1]]) +ax1.set_ylim([bin2_range[0], bin2_range[1]]) +ax1.tick_params(axis='y', labelsize=15, length=5, width=1) +ax1.tick_params(axis='x', labelsize=15, length=5, width=1) +ax1.set_ylabel('$\Delta$q ($g/kg$)', size=15) +ax1.set_xlabel('10m wind speed ($m/s$)', size=15) +ax1.set_title('Obs', color='black', weight='bold', size=22) + +ax2.set_yticks(np.arange(bin2_range[0], bin2_range[1]+1, 2)) +ax2.set_xticks(np.arange(bin1_range[0], bin1_range[1]+1, 2)) +ax2.set_xlim([bin1_range[0], bin1_range[1]]) +ax2.set_ylim([bin2_range[0], bin2_range[1]]) +ax2.tick_params(axis='y', labelsize=15, length=5, width=1) +ax2.tick_params(axis='x', labelsize=15, length=5, width=1) +ax2.set_ylabel('', size=15) +ax2.set_xlabel('10m wind speed ($m/s$)', size=15) +ax2.set_title('Model', color='black', weight='bold', size=22) xps = (np.array(bin1_range).min()) yps = np.array(bin2_range).max()+2 @@ -245,83 +241,81 @@ ################################################################ -dx=0.3 +dx = 0.3 ds_modelbias_bin = ds_model_bin-ds_stn_bin -ds_modelbias_bin_conf = np.sqrt(ds_model_bin**2+ds_stn_bin**2) +ds_modelbias_bin_conf = np.sqrt(ds_model_bin**2+ds_stn_bin**2) ds_modelbias_bin_ratio = (ds_model_bin-ds_stn_bin)/ds_model_bin - # ========== hfls bias =========== -ax2 = fig.add_axes([1*2+dx,0,1,0.8]) +ax2 = fig.add_axes([1*2+dx, 0, 1, 0.8]) im = ds_modelbias_bin['hfls'].plot.pcolormesh(x='sfcWind', - y='del_q', - ax=ax2, - levels=dqlat_level, - extend='both', - cmap='RdBu_r',) + y='del_q', + ax=ax2, + levels=dqlat_level, + extend='both', + cmap='RdBu_r') cb=im.colorbar cb.remove() im2 = (ds_model_bin['pr']*pr_factor_model).plot.contour(x='sfcWind', - y='del_q', - ax=ax2, - levels=pr_level, - extend='max', - cmap='summer',linewidths=4) + y='del_q', + ax=ax2, + levels=pr_level, + extend='max', + cmap='summer', + linewidths=4 + ) im2.clabel(pr_level, inline=True, fmt='%0.2f', fontsize=10) -cbaxes=fig.add_axes([1*3+dx-0.1,0,0.02,0.8]) -cbar=fig.colorbar(im,cax=cbaxes,orientation='vertical') +cbaxes=fig.add_axes([1*3+dx-0.1, 0, 0.02, 0.8]) +cbar=fig.colorbar(im, cax=cbaxes, orientation='vertical') cbar.set_ticks(dqlat_level) cbar.set_ticklabels(["%0.0f"%(n) for n in dqlat_level]) -cbar.ax.tick_params(labelsize=15,rotation=0) +cbar.ax.tick_params(labelsize=15, rotation=0) cbar.set_label(label='Latent heat flux bias ($W/m^2$)', size=15, labelpad=15) -biases_conf = (ds_modelbias_bin_conf['hfls_conf_%0.2f'%(stt)] - .where(ds_modelbias_bin_conf['hfls_conf_%0.2f'%(stt)] - Precipitation Buoyancy Statistics

The precipitation-buoyancy diagnostics POD evaluates the thermodynamic sensitivity of model -precipitation. This POD relates precipitation to a measure of lower-tropospheric averaged buoyancy (BL) and its two components: +precipitation. This POD relates precipitation to a measure of lower-tropospheric averaged buoyancy + (BL) and its two components: a measure of lower-tropospheric Convective Available Potential Energy (CAPEL) and lower-tropospheric sub saturation (SUBSATL). The observational baseline for the comparison is constructed using ERA5 @@ -17,7 +18,7 @@

Precipitation Buoyancy Statistics

collection of CMIP6 models with respect to measures of thermodynamic sensitivity. -
-
Time period: {{FIRSTYR}}-{{LASTYR}} +< color=navy>Time period: {{startdate}}-{{enddate}} {{CASENAME}} OBS
Precipitation Buoyancy Statistics for {{CASENAME}} +< color=navy> Precipitation Buoyancy Statistics for {{CASENAME}}
Precipitation-buoyancy curve   plot diff --git a/diagnostics/precip_buoy_diag/precip_buoy_diag.py b/diagnostics/precip_buoy_diag/precip_buoy_diag.py index 97fcb9dbf..f85be284f 100644 --- a/diagnostics/precip_buoy_diag/precip_buoy_diag.py +++ b/diagnostics/precip_buoy_diag/precip_buoy_diag.py @@ -1,4 +1,4 @@ -''' +""" This file is part of the precip_buoy_diag module of the MDTF code package (see mdtf/MDTF-diagnostics/LICENSE.txt). @@ -11,71 +11,70 @@ LAST EDIT: REFERENCES: - -''' +""" # Import standard Python packages import os import subprocess from precip_buoy_diag_util import precipbuoy -### Set environment variables pointing to pr, hus, ta and ps. -### Once path variables from settings.jsonc are available, this step is redundant. +# Set environment variables pointing to pr, hus, ta and ps. +# Once path variables from settings.jsonc are available, this step is redundant. os.environ["ta_file"] = "{DATADIR}/1hr/{CASENAME}.{ta_var}.1hr.nc".format(**os.environ) os.environ["hus_file"] = "{DATADIR}/1hr/{CASENAME}.{qa_var}.1hr.nc".format(**os.environ) os.environ["pr_file"] = "{DATADIR}/1hr/{CASENAME}.{pr_var}.1hr.nc".format(**os.environ) os.environ["ps_file"] = "{DATADIR}/1hr/{CASENAME}.{ps_var}.1hr.nc".format(**os.environ) -### This POD produces intermediate files that are worth saving. -### Here we specify the save directory. -### Can include option to obtain this from settings.jsonc. -os.environ["temp_dir"] =os.environ["WK_DIR"]+'/model' +# This POD produces intermediate files that are worth saving. +# Here we specify the save directory. +# Can include option to obtain this from settings.jsonc. +os.environ["temp_dir"] = os.environ["WORK_DIR"]+'/model' os.environ["temp_file"] = "{temp_dir}/{CASENAME}.buoy_var.1hr.nc".format(**os.environ) -os.environ["binned_output"]="{WK_DIR}/obs/{CASENAME}.binnedpcp.1hr.nc".format(**os.environ) +os.environ["binned_output"] = "{WORK_DIR}/obs/{CASENAME}.binnedpcp.1hr.nc".format(**os.environ) -### Read obs. files for plotting -OBS_FILE_NAME='trmm3B42_era5_2002_2014.convecTransLev2.nc' -CMIP6_FILE_NAME='ERA5_CMIP6_gamma_properties.nc' +# Read obs. files for plotting +OBS_FILE_NAME = 'trmm3B42_era5_2002_2014.convecTransLev2.nc' +CMIP6_FILE_NAME = 'ERA5_CMIP6_gamma_properties.nc' -os.environ["binned_obs"]="{OBS_DATA}/".format(**os.environ)+OBS_FILE_NAME -os.environ["cmip6_output"]="{OBS_DATA}/".format(**os.environ)+CMIP6_FILE_NAME +os.environ["binned_obs"] = "{OBS_DATA}/".format(**os.environ) + OBS_FILE_NAME +os.environ["cmip6_output"] = "{OBS_DATA}/".format(**os.environ) + CMIP6_FILE_NAME -### Obtain the location of the region mask ### -### Another flexible option to read in from user -os.environ["region_mask"]=os.environ["WK_DIR"]+'/model/'+'region_0.25x0.25_costal2.5degExcluded.mat' +# Obtain the location of the region mask ### +# Another flexible option to read in from user +os.environ["region_mask"] = os.environ["WORK_DIR"] + '/model/' + 'region_0.25x0.25_costal2.5degExcluded.mat' -### A cython executable must be created. -### First delete any existing builds ### +# A cython executable must be created. +# First delete any existing builds ### try: - os.remove(os.environ["POD_HOME"]+'/*.c') - os.remove(os.environ["POD_HOME"]+'/*.so') -except Exception: + os.remove(os.environ["POD_HOME"] + '/*.c') + os.remove(os.environ["POD_HOME"] + '/*.so') +except Exception as exc: + print(exc) pass -### Compiling cython +# Compiling cython try: build_cython=subprocess.run(['python', - os.environ["POD_HOME"]+"/precip_buoy_diag_setup_cython.py", - 'build_ext','--build-lib='+os.environ['POD_HOME']],check=True) - if (build_cython.returncode)==0: + os.environ["POD_HOME"] + "/precip_buoy_diag_setup_cython.py", 'build_ext', + '--build-lib=' + os.environ['POD_HOME']], check=True) + if build_cython.returncode == 0: print('>>>>>>>Successfully compiled cython file') except subprocess.CalledProcessError as err: - print ("PODError > ",err.output) - print ("PODError > ",err.stderr) + print("PODError > ", err.output) + print("PODError > ", err.stderr) -## Calling POD ### +# Calling POD ### -### initialize pod ### -pb_pod=precipbuoy() +# initialize pod ### +pb_pod = precipbuoy() if pb_pod.binned: print('BINNED OUTPUT AVAILABLE. MOVING ONTO PLOTTING...') pb_pod.plot() - else: print('BINNED OUTPUT UNAVAILABLE. CHECKING FOR PREPROCESSED FILES') - ### Check if pre-processed files are available. + # Check if pre-processed files are available. if pb_pod.preprocessed: print('PREPROCESSED FILES AVAILABLE. MOVING ONTO BINNING...') diff --git a/diagnostics/precip_buoy_diag/precip_buoy_diag_setup_cython.py b/diagnostics/precip_buoy_diag/precip_buoy_diag_setup_cython.py index 5e78f40c3..580e5a8b2 100644 --- a/diagnostics/precip_buoy_diag/precip_buoy_diag_setup_cython.py +++ b/diagnostics/precip_buoy_diag/precip_buoy_diag_setup_cython.py @@ -4,7 +4,7 @@ import os setup( - ext_modules = cythonize(os.environ['POD_HOME']+"/*.pyx"), + ext_modules=cythonize(os.environ['POD_HOME'] + "/*.pyx"), include_dirs=[np.get_include()] ) diff --git a/diagnostics/precip_buoy_diag/precip_buoy_diag_util.py b/diagnostics/precip_buoy_diag/precip_buoy_diag_util.py index 369d19955..c58b91a28 100644 --- a/diagnostics/precip_buoy_diag/precip_buoy_diag_util.py +++ b/diagnostics/precip_buoy_diag/precip_buoy_diag_util.py @@ -1,4 +1,4 @@ -''' +""" This file is part of the precip_buoy_diag module of the MDTF code package (see mdtf/MDTF-diagnostics/LICENSE.txt). @@ -8,7 +8,7 @@ AUTHORS: Fiaz Ahmed -''' +""" import os import glob @@ -25,327 +25,320 @@ from matplotlib.patches import FancyArrowPatch from mpl_toolkits.mplot3d import proj3d + # ====================================================================== # precipbuoy_binThetae -# takes arguments and bins by subsat+ cape & bint +# takes arguments and bins by subsat+ cape & bint + -@jit(nopython=True) -def precipbuoy_binThetae(lon_idx, REGION, PRECIP_THRESHOLD, NUMBER_CAPE_BIN, NUMBER_SUBSAT_BIN, -NUMBER_BL_BIN, CAPE, SUBSAT, BL, RAIN, p0, p1, p2, pe, q0, q1, q2, qe): - - +@jit(nopython=True) +def precipbuoy_binThetae(lon_idx, REGION, PRECIP_THRESHOLD, NUMBER_CAPE_BIN, NUMBER_SUBSAT_BIN, + NUMBER_BL_BIN, CAPE, SUBSAT, BL, RAIN, p0, p1, p2, pe, q0, q1, q2, qe): for lat_idx in np.arange(SUBSAT.shape[1]): - subsat_idx=SUBSAT[:,lat_idx,lon_idx] - cape_idx=CAPE[:,lat_idx,lon_idx] - bl_idx=BL[:,lat_idx,lon_idx] - rain=RAIN[:,lat_idx,lon_idx] - reg=REGION[lon_idx,lat_idx] - - if reg>0: + subsat_idx = SUBSAT[:, lat_idx, lon_idx] + cape_idx = CAPE[:, lat_idx, lon_idx] + bl_idx = BL[:, lat_idx, lon_idx] + rain = RAIN[:, lat_idx, lon_idx] + reg = REGION[lon_idx, lat_idx] + + if reg > 0: for time_idx in np.arange(SUBSAT.shape[0]): - if (cape_idx[time_idx]=0 - and subsat_idx[time_idx]=0 - and np.isfinite(rain[time_idx])): - p0[subsat_idx[time_idx],cape_idx[time_idx]]+=1 - p1[subsat_idx[time_idx],cape_idx[time_idx]]+=rain[time_idx] - p2[subsat_idx[time_idx],cape_idx[time_idx]]+=rain[time_idx]**2 - - if (rain[time_idx]>PRECIP_THRESHOLD): - pe[subsat_idx[time_idx],cape_idx[time_idx]]+=1 - - if (bl_idx[time_idx]=0 - and np.isfinite(rain[time_idx])): - q0[bl_idx[time_idx]]+=1 - q1[bl_idx[time_idx]]+=rain[time_idx] - q2[bl_idx[time_idx]]+=rain[time_idx]**2 - if (rain[time_idx]>PRECIP_THRESHOLD): - qe[bl_idx[time_idx]]+=1 - + if cape_idx[time_idx] < NUMBER_CAPE_BIN and cape_idx[time_idx] >= 0 \ + and subsat_idx[time_idx] < NUMBER_SUBSAT_BIN and subsat_idx[time_idx] >= 0 \ + and np.isfinite(rain[time_idx]): + p0[subsat_idx[time_idx], cape_idx[time_idx]] += 1 + p1[subsat_idx[time_idx], cape_idx[time_idx]] += rain[time_idx] + p2[subsat_idx[time_idx], cape_idx[time_idx]] += rain[time_idx] ** 2 + + if rain[time_idx] > PRECIP_THRESHOLD: + pe[subsat_idx[time_idx], cape_idx[time_idx]] += 1 + + if bl_idx[time_idx] < NUMBER_BL_BIN and bl_idx[time_idx] >= 0 and np.isfinite(rain[time_idx]): + q0[bl_idx[time_idx]] += 1 + q1[bl_idx[time_idx]] += rain[time_idx] + q2[bl_idx[time_idx]] += rain[time_idx] ** 2 + if rain[time_idx] > PRECIP_THRESHOLD: + qe[bl_idx[time_idx]] += 1 + + class precipbuoy: + preprocessed: bool = False + binned: bool = False + DATE_FORMAT: str + NEW_VARS: dict def __init__(self): - ### read in the primary input variable paths - - ### flag to check if a pre-processed file exists + # read in the primary input variable paths + # flag to check if a pre-processed file exists if glob.glob(os.environ["temp_file"]): - self.preprocessed=True + self.preprocessed = True else: - self.preprocessed=False - - ### flag to check if binned output exists ### + self.preprocessed = False + + # flag to check if binned output exists ### if glob.glob(os.environ["binned_output"]): - self.binned=True + self.binned = True else: - self.binned=False - - ### set time and latitudinal slices here ### - ### the analysis will only occur over this subset ### - strt_dt=dt.datetime.strptime(str(os.environ['FIRSTYR'])+'010100',"%Y%m%d%H") - end_dt=dt.datetime.strptime(str(os.environ['LASTYR'])+'123123',"%Y%m%d%H") - - self.time_slice=slice(strt_dt, end_dt) ## set time slice - self.lat_slice=slice(-20,20) ## set latitudinal slice - - ### Ensure that start and end dates span more than 1 day. - if (self.time_slice.stop-self.time_slice.start).days<1: + self.binned = False + + # set time and latitudinal slices here ### + # the analysis will only occur over this subset ### + strt_dt = dt.datetime.strptime(str(os.environ['startdate']) + '010100', "%Y%m%d%H") + end_dt = dt.datetime.strptime(str(os.environ['enddate']) + '123123', "%Y%m%d%H") + + self.time_slice = slice(strt_dt, end_dt) # set time slice + self.lat_slice = slice(-20, 20) # set latitudinal slice + + # Ensure that start and end dates span more than 1 day. + if (self.time_slice.stop - self.time_slice.start).days < 1: exit('Please set time range greater than 1 day. Exiting now') - - ### Format for datetime conversion - self.DATE_FORMAT='%Y%m%d' - - ### rename dimensions to internal names for ease of use - self.NEW_VARS={'time':'time','lev':'lev','lat':'lat','lon':'lon'} + # Format for datetime conversion + self.DATE_FORMAT = '%Y%m%d' + # rename dimensions to internal names for ease of use + self.NEW_VARS = {'time': 'time', 'lev': 'lev', 'lat': 'lat', 'lon': 'lon'} - - # # Function preprocess takes in 3D tropospheric temperature and specific humidity fields on model levels, # # and calculates: thetae_LFT, thetae_sat_LFT & thetae_BL. - + def preprocess(self): - ### LOAD temp. and q datasets ### - ta_ds=xr.open_mfdataset(os.environ['ta_file']) - hus_ds=xr.open_mfdataset(os.environ['hus_file']) - - + # LOAD temp. and q datasets ### + ta_ds = xr.open_mfdataset(os.environ['ta_file']) + hus_ds = xr.open_mfdataset(os.environ['hus_file']) + print("....SLICING DATA") - ta_ds_subset=self._slice_data(ta_ds) - hus_ds_subset=self._slice_data(hus_ds) - - - ### check to ensure that time subsets are non-zero ### - assert ta_ds_subset.time.size>0 , 'specified time range is zero!!' - - ### Load arrays into memory ### - - lat=ta_ds_subset['lat'] - lon=ta_ds_subset['lon'] - ta=ta_ds_subset[os.environ['ta_var']] - hus=hus_ds_subset[os.environ['qa_var']] - lev=ta_ds_subset['lev'] - - - ### Is surface pressure is available, extract it - ### if not set ps_ds to None + ta_ds_subset = self._slice_data(ta_ds) + hus_ds_subset = self._slice_data(hus_ds) + + # check to ensure that time subsets are non-zero ### + assert ta_ds_subset.time.size > 0, 'specified time range is zero!!' + + # Load arrays into memory ### + + ta = ta_ds_subset[os.environ['ta_var']] + hus = hus_ds_subset[os.environ['qa_var']] + lev = ta_ds_subset['lev'] + + # Is surface pressure is available, extract it + # if not set ps_ds to None if os.environ['ps_file']: - ps_ds=xr.open_mfdataset(os.environ['ps_file']) - ps_ds_subset=self._slice_data(ps_ds,dimsize=3) + ps_ds = xr.open_mfdataset(os.environ['ps_file']) + ps_ds_subset = self._slice_data(ps_ds, dimsize=3) else: - ps_ds_subset=None - - ### extract pressure levels - pres,ps=self._return_pres_levels(lev,ta,ps_ds_subset) - - assert(ta['time'].size==hus['time'].size) - - ### setting parameters for buoyancy calculations - - ## setting the pressure level at the top of a nominal boundary layer - pbl_top=ps-100e2 ## The BL is 100 mb thick ## - pbl_top=np.float_(pbl_top.values.flatten()) ### overwriting pbl top xarray with numpy array - - ## setting the pressure level at the top of a nominal lower free troposphere (LFT) - low_top=np.zeros_like(ps) - low_top[:]=500e2 # the LFT top is fixed at 500 mb - low_top=np.float_(low_top.flatten()) - - ### LOAD data arrays into memory### + ps_ds_subset = None + + # extract pressure levels + pres, ps = self._return_pres_levels(lev, ta, ps_ds_subset) + + assert (ta['time'].size == hus['time'].size) + + # setting parameters for buoyancy calculations + + # setting the pressure level at the top of a nominal boundary layer + pbl_top = ps - 100e2 # The BL is 100 mb thick ## + pbl_top = np.float_(pbl_top.values.flatten()) # overwriting pbl top xarray with numpy array + + # setting the pressure level at the top of a nominal lower free troposphere (LFT) + low_top = np.zeros_like(ps) + low_top[:] = 500e2 # the LFT top is fixed at 500 mb + low_top = np.float_(low_top.flatten()) + + # LOAD data arrays into memory### print('...LOADING ARRAYS INTO MEMORY') - ta=ta.transpose('lev','time','lat','lon') - hus=hus.transpose('lev','time','lat','lon') - pres=pres.transpose('lev','time','lat','lon') - ps=ps.transpose('time','lat','lon') + ta = ta.transpose('lev', 'time', 'lat', 'lon') + hus = hus.transpose('lev', 'time', 'lat', 'lon') + pres = pres.transpose('lev', 'time', 'lat', 'lon') + ps = ps.transpose('time', 'lat', 'lon') - pres=pres.values - ta=np.asarray(ta.values,dtype='float') - hus=np.asarray(hus.values,dtype='float') + pres = pres.values + ta = np.asarray(ta.values, dtype='float') + hus = np.asarray(hus.values, dtype='float') print('...DONE LOADING') ta_ds.close() hus_ds.close() - - ### Check if pressure array is descending - ### since this is an implicit assumption - if (np.all(np.diff(pres,axis=0)<0)): + # Check if pressure array is descending + # since this is an implicit assumption + + if np.all(np.diff(pres, axis=0) < 0): print(' pressure levels strictly decreasing') - elif (np.all(np.diff(pres,axis=0)>0)): + elif np.all(np.diff(pres, axis=0) > 0): print(' pressure levels strictly increasing') print(' reversing the pressure dimension') - pres=pres[::-1,:,:,:] - ta=ta[::-1,:,:,:] - hus=hus[::-1,:,:,:] + pres = pres[::-1, :, :, :] + ta = ta[::-1, :, :, :] + hus = hus[::-1, :, :, :] else: exit('......Check pressure level ordering. Exiting now..') - ### Reshape arrays to 2D ### - + # Reshape arrays to 2D ### + print('...COMPUTING THETAE VARIABLES') - lev=pres.reshape(*lev.shape[:1],-1) - ta_flat=ta.reshape(*ta.shape[:1],-1) - hus_flat=hus.reshape(*hus.shape[:1],-1) + lev = pres.reshape(*lev.shape[:1], -1) + ta_flat = ta.reshape(*ta.shape[:1], -1) + hus_flat = hus.reshape(*hus.shape[:1], -1) - pbl_ind=np.zeros(pbl_top.size,dtype=np.int64) - low_ind=np.zeros(low_top.size,dtype=np.int64) + pbl_ind = np.zeros(pbl_top.size, dtype=np.int64) + low_ind = np.zeros(low_top.size, dtype=np.int64) - ### Find the closest pressure level to pbl_top and low_top - ### using a cython routine 'find_closest_index_2D' - find_closest_index_2D(pbl_top,lev,pbl_ind) - find_closest_index_2D(low_top,lev,low_ind) + # Find the closest pressure level to pbl_top and low_top + # using a cython routine 'find_closest_index_2D' + find_closest_index_2D(pbl_top, lev, pbl_ind) + find_closest_index_2D(low_top, lev, low_ind) - ### Declare empty arrays to hold thetae variables - thetae_bl=np.zeros_like(pbl_top) - thetae_lt=np.zeros_like(pbl_top) - thetae_sat_lt=np.zeros_like(pbl_top) + # Declare empty arrays to hold thetae variables + thetae_bl = np.zeros_like(pbl_top) + thetae_lt = np.zeros_like(pbl_top) + thetae_sat_lt = np.zeros_like(pbl_top) - ### the fractional weighting of the boundary layer in - ### buoyancy computation - wb=np.zeros_like(pbl_top) + # the fractional weighting of the boundary layer in + # buoyancy computation + wb = np.zeros_like(pbl_top) - ### Use trapezoidal rule for approximating the vertical integral ### - ### vert. integ.=(b-a)*(f(a)+f(b))/2 - ### using a cython routine 'compute_layer_thetae' + # Use trapezoidal rule for approximating the vertical integral ### + # vert. integ.=(b-a)*(f(a)+f(b))/2 + # using a cython routine 'compute_layer_thetae' compute_layer_thetae(ta_flat, hus_flat, lev, pbl_ind, low_ind, thetae_bl, thetae_lt, thetae_sat_lt, wb) - ### if thetae_bl is zero set it to nan - ### masking is an option. - thetae_bl[thetae_bl==0]=np.nan - thetae_lt[thetae_lt==0]=np.nan - thetae_sat_lt[thetae_sat_lt==0]=np.nan - - ### Unflatten the space dimension to lat,lon ### - thetae_bl=thetae_bl.reshape(ps.shape) - thetae_lt=thetae_lt.reshape(ps.shape) - thetae_sat_lt=thetae_sat_lt.reshape(ps.shape) - - print('.....'+os.environ['ta_file']+" & "+os.environ['hus_file']+" pre-processed!") - - ### SAVING INTERMEDIATE FILE TO DISK ### - - data_set=xr.Dataset(data_vars={"thetae_bl":(ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, thetae_bl), - "thetae_lt":(ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, thetae_lt), - "thetae_sat_lt":(ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, thetae_sat_lt), - "ps":(ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, ps)}, - coords=ta_ds_subset[os.environ['ta_var']].isel(lev=0).drop('lev').coords) - data_set.thetae_bl.attrs['long_name']="theta_e averaged in the BL (100 hPa above surface)" - data_set.thetae_lt.attrs['long_name']="theta_e averaged in the LFT (100 hPa above surface to 500 hPa)" - data_set.thetae_sat_lt.attrs['long_name']="theta_e_sat averaged in the LFT (100 hPa above surface to 500 hPa)" - data_set.ps.attrs['long_name']="surface pressure" - - data_set.thetae_bl.attrs['units']="K" - data_set.thetae_lt.attrs['units']="K" - data_set.thetae_sat_lt.attrs['units']="K" - data_set.ps.attrs['units']='Pa' - - data_set.attrs['source']="Precipiation Buoyancy Diagnostics \ + # if thetae_bl is zero set it to nan + # masking is an option. + thetae_bl[thetae_bl == 0] = np.nan + thetae_lt[thetae_lt == 0] = np.nan + thetae_sat_lt[thetae_sat_lt == 0] = np.nan + + # Unflatten the space dimension to lat,lon ### + thetae_bl = thetae_bl.reshape(ps.shape) + thetae_lt = thetae_lt.reshape(ps.shape) + thetae_sat_lt = thetae_sat_lt.reshape(ps.shape) + + print('.....' + os.environ['ta_file'] + " & " + os.environ['hus_file'] + " pre-processed!") + + # SAVING INTERMEDIATE FILE TO DISK ### + + data_set = xr.Dataset(data_vars={"thetae_bl": (ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, thetae_bl), + "thetae_lt": (ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, thetae_lt), + "thetae_sat_lt": (ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, + thetae_sat_lt), + "ps": (ta_ds_subset[os.environ['ta_var']].isel(lev=0).dims, ps)}, + coords=ta_ds_subset[os.environ['ta_var']].isel(lev=0).drop('lev').coords) + data_set.thetae_bl.attrs['long_name'] = "theta_e averaged in the BL (100 hPa above surface)" + data_set.thetae_lt.attrs['long_name'] = "theta_e averaged in the LFT (100 hPa above surface to 500 hPa)" + data_set.thetae_sat_lt.attrs['long_name'] = "theta_e_sat averaged in the LFT (100 hPa above surface to 500 hPa)" + data_set.ps.attrs['long_name'] = "surface pressure" + + data_set.thetae_bl.attrs['units'] = "K" + data_set.thetae_lt.attrs['units'] = "K" + data_set.thetae_sat_lt.attrs['units'] = "K" + data_set.ps.attrs['units'] = 'Pa' + + data_set.attrs['source'] = "Precipiation Buoyancy Diagnostics \ - as part of the NOAA Model Diagnostic Task Force (MDTF)" - data_set.to_netcdf(os.environ["temp_file"],mode='w') - print('...'+os.environ["temp_file"]+" saved!") - - ### set preprocessed flag to True - if glob.glob(os.environ["temp_file"]): - self.preprocessed=True - - + data_set.to_netcdf(os.environ["temp_file"], mode='w') + print('...' + os.environ["temp_file"] + " saved!") + # set preprocessed flag to True + if glob.glob(os.environ["temp_file"]): + self.preprocessed = True - ### function to fix datetime formats - def _fix_datetime(self, ds,date_format=None): - try: - if ds.indexes['time'].dtype=='float64' or ds.indexes['time'].dtype=='int64': - ds['time']=[dt.datetime.strptime(str(int(i.values)),date_format) for i in ds.time] + # function to fix datetime formats + def _fix_datetime(self, ds, date_format=None): + try: + if ds.indexes['time'].dtype == 'float64' or ds.indexes['time'].dtype == 'int64': + ds['time'] = [dt.datetime.strptime(str(int(i.values)), date_format) for i in ds.time] else: datetimeindex = ds.indexes['time'].to_datetimeindex() ds['time'] = datetimeindex - except: + except Exception as exc: + print(exc) pass - - def _slice_data(self,ds,dimsize=4): - ''' + + def _slice_data(self, ds, dimsize=4): + """ This function can open file, fix co-ordinate names, and slice data for each variable - ''' - - LAT_VAR_NEW=self.NEW_VARS['lat'] - LON_VAR_NEW=self.NEW_VARS['lon'] - TIME_VAR_NEW=self.NEW_VARS['time'] - LEV_VAR_NEW=self.NEW_VARS['lev'] - - if dimsize==4: - ds=ds.rename({os.environ['time_coord']:TIME_VAR_NEW,os.environ['lat_coord']:LAT_VAR_NEW,os.environ['lon_coord']:LON_VAR_NEW, - os.environ['lev_coord']:LEV_VAR_NEW}) - - elif dimsize==3: - ds=ds.rename({os.environ['time_coord']:TIME_VAR_NEW,os.environ['lat_coord']:LAT_VAR_NEW,os.environ['lon_coord']:LON_VAR_NEW}) - - - ### Ensure that times are in datetime format ### + """ + + LAT_VAR_NEW = self.NEW_VARS['lat'] + LON_VAR_NEW = self.NEW_VARS['lon'] + TIME_VAR_NEW = self.NEW_VARS['time'] + LEV_VAR_NEW = self.NEW_VARS['lev'] + + if dimsize == 4: + ds = ds.rename({os.environ['time_coord']: TIME_VAR_NEW, + os.environ['lat_coord']: LAT_VAR_NEW, + os.environ['lon_coord']: LON_VAR_NEW, + os.environ['lev_coord']: LEV_VAR_NEW} + ) + + elif dimsize == 3: + ds = ds.rename({os.environ['time_coord']: TIME_VAR_NEW, + os.environ['lat_coord']: LAT_VAR_NEW, + os.environ['lon_coord']: LON_VAR_NEW} + ) + + # Ensure that times are in datetime format ### self._fix_datetime(ds, self.DATE_FORMAT) - ### select subset ### - ds_subset=ds.sel(time=self.time_slice,lat=self.lat_slice) + # select subset ### + ds_subset = ds.sel(time=self.time_slice, lat=self.lat_slice) return ds_subset - def _return_pres_levels(self, lev, da, ps_ds): - - ''' + + """ Function to set pressure levels and surface pressure depending on whether incoming levels are on pressure or sigma co-ordinates. - ''' - - ### Check if pressure or sigma co-ordinates ### - - if os.environ['VERT_TYPE']=='pres': - - pres=lev - ### Check if units are in hPa (or mb) - ### Convert units to Pa if required ### - if(str(pres.units).lower() in [i.lower() for i in ['hPa','mb']]): - pres=pres*100 - - ### Convert data type - pres=pres.astype('float') - - - ## broadcast pressure to a 4D array to mimic sigma levels - ## this step is computationally inefficient, but helps retain - ## portability between pressure and sigma level handling - - pres,dummy=xr.broadcast(pres,da.isel(lev=0,drop=True)) - - - ### Read surface pressure values if available + """ + + # Check if pressure or sigma co-ordinates ### + + if os.environ['VERT_TYPE'] == 'pres': + + pres = lev + # Check if units are in hPa (or mb) + # Convert units to Pa if required ### + if str(pres.units).lower() in [i.lower() for i in ['hPa', 'mb']]: + pres = pres * 100 + + # Convert data type + pres = pres.astype('float') + + # broadcast pressure to a 4D array to mimic sigma levels + # this step is computationally inefficient, but helps retain + # portability between pressure and sigma level handling + + pres, dummy = xr.broadcast(pres, da.isel(lev=0, drop=True)) + + # Read surface pressure values if available if ps_ds: - ps=ps_ds[os.environ['ps_var']] - if(ps.units=='hPa'): - ps=ps*100 - ### if unavailable, set surface pressure to maximum pressure level - else: - ps=pres.sel(lev=lev.max().values) - - - elif os.environ['VERT_TYPE']=='sigma': - ### currently written so that coefficients a and b are - ### stored in the surface pressure file - a=ps_ds[os.environ['a_var']] - b=ps_ds[os.environ['b_var']] - ps=ps_ds[os.environ['ps_var']] - - ### Create pressure data ### - pres=b*ps+a + ps = ps_ds[os.environ['ps_var']] + if ps.units == 'hPa': + ps = ps * 100 + # if unavailable, set surface pressure to maximum pressure level + else: + ps = pres.sel(lev=lev.max().values) + + elif os.environ['VERT_TYPE'] == 'sigma': + # currently written so that coefficients a and b are + # stored in the surface pressure file + a = ps_ds[os.environ['a_var']] + b = ps_ds[os.environ['b_var']] + ps = ps_ds[os.environ['ps_var']] + + # Create pressure data ### + pres = b * ps + a return pres, ps - - - # ====================================================================== + + # ====================================================================== # generate_region_mask: function provided by Yi-Hung Kuo # generates a map of integer values that correspond to regions using # the file region_0.25x0.25_costal2.5degExcluded.mat @@ -357,791 +350,761 @@ def _return_pres_levels(self, lev, da, ps_ds): # def _generate_region_mask(self, region_mask_filename, ds): - + print("...generating region mask..."), # Load & Pre-process Region Mask - matfile=scipy.io.loadmat(region_mask_filename) - lat_m=matfile["lat"] - lon_m=matfile["lon"] # 0.125~359.875 deg - region=matfile["region"] - lon_m=np.append(lon_m,np.reshape(lon_m[0,:],(-1,1))+360,0) - lon_m=np.append(np.reshape(lon_m[-2,:],(-1,1))-360,lon_m,0) - region=np.append(region,np.reshape(region[0,:],(-1,lat_m.size)),0) - region=np.append(np.reshape(region[-2,:],(-1,lat_m.size)),region,0) + matfile = scipy.io.loadmat(region_mask_filename) + lat_m = matfile["lat"] + lon_m = matfile["lon"] # 0.125~359.875 deg + region = matfile["region"] + lon_m = np.append(lon_m, np.reshape(lon_m[0, :], (-1, 1)) + 360, 0) + lon_m = np.append(np.reshape(lon_m[-2, :], (-1, 1)) - 360, lon_m, 0) + region = np.append(region, np.reshape(region[0, :], (-1, lat_m.size)), 0) + region = np.append(np.reshape(region[-2, :], (-1, lat_m.size)), region, 0) - LAT,LON=np.meshgrid(lat_m,lon_m,sparse=False,indexing="xy") - LAT=np.reshape(LAT,(-1,1)) - LON=np.reshape(LON,(-1,1)) - REGION=np.reshape(region,(-1,1)) + LAT, LON = np.meshgrid(lat_m, lon_m, sparse=False, indexing="xy") + LAT = np.reshape(LAT, (-1, 1)) + LON = np.reshape(LON, (-1, 1)) + REGION = np.reshape(region, (-1, 1)) - LATLON=np.squeeze(np.array((LAT,LON))) - LATLON=LATLON.transpose() + LATLON = np.squeeze(np.array((LAT, LON))) + LATLON = LATLON.transpose() - regMaskInterpolator=NearestNDInterpolator(LATLON,REGION) + regMaskInterpolator = NearestNDInterpolator(LATLON, REGION) # Interpolate Region Mask onto Model Grid using Nearest Grid Value -# pr_netcdf=Dataset(model_netcdf_filename,"r") - lon=ds.lon.values - lat=ds.sel(lat=self.lat_slice).lat.values - if lon[lon<0.0].size>0: - lon[lon[lon<0.0]]+=360.0 - - LAT,LON=np.meshgrid(lat,lon,sparse=False,indexing="xy") - LAT=np.reshape(LAT,(-1,1)) - LON=np.reshape(LON,(-1,1)) - LATLON=np.squeeze(np.array((LAT,LON))) - LATLON=LATLON.transpose() - REGION=np.zeros(LAT.size) + # pr_netcdf=Dataset(model_netcdf_filename,"r") + lon = ds.lon.values + lat = ds.sel(lat=self.lat_slice).lat.values + if lon[lon < 0.0].size > 0: + lon[lon[lon < 0.0]] += 360.0 + + LAT, LON = np.meshgrid(lat, lon, sparse=False, indexing="xy") + LAT = np.reshape(LAT, (-1, 1)) + LON = np.reshape(LON, (-1, 1)) + LATLON = np.squeeze(np.array((LAT, LON))) + LATLON = LATLON.transpose() + REGION = np.zeros(LAT.size) for latlon_idx in np.arange(REGION.shape[0]): - REGION[latlon_idx]=regMaskInterpolator(LATLON[latlon_idx,:]) - REGION=np.reshape(REGION.astype(int),(-1,lat.size)) - + REGION[latlon_idx] = regMaskInterpolator(LATLON[latlon_idx, :]) + REGION = np.reshape(REGION.astype(int), (-1, lat.size)) + print("...Generated!") return REGION - def bin(self): - - ### Define binning parameters ### - ### Currently set inside the POD; a flexible option would - ### be to read user-defined values. This would be useful, - ### if a model state space is different from usually encountered. - - bl_bin_params={} - bl_bin_params['width']=0.01 - bl_bin_params['max']=1.50 - bl_bin_params['min']=-1.5 - + # Define binning parameters ### + # Currently set inside the POD; a flexible option would + # be to read user-defined values. This would be useful, + # if a model state space is different from usually encountered. + + bl_bin_params = {} + bl_bin_params['width'] = 0.01 + bl_bin_params['max'] = 1.50 + bl_bin_params['min'] = -1.5 + # Bin width and intervals for CAPE and SUBSAT. # In units of K - cape_params_list=[1,20,-40] - subsat_params_list=[1,42,-1] - - cape_bin_params={key: value for key,value in zip(bl_bin_params.keys(),cape_params_list)} - subsat_bin_params={key: value for key,value in zip(bl_bin_params.keys(),subsat_params_list)} - - generate_bin_center = lambda x: np.arange(x['min'],x['max']+x['width'],x['width']) - - cape_bin_center=generate_bin_center(cape_bin_params) - subsat_bin_center=generate_bin_center(subsat_bin_params) - bl_bin_center=generate_bin_center(bl_bin_params) - - NUMBER_CAPE_BIN=cape_bin_center.size - NUMBER_SUBSAT_BIN=subsat_bin_center.size - NUMBER_BL_BIN=bl_bin_center.size - + cape_params_list = [1, 20, -40] + subsat_params_list = [1, 42, -1] + + cape_bin_params = {key: value for key, value in zip(bl_bin_params.keys(), cape_params_list)} + subsat_bin_params = {key: value for key, value in zip(bl_bin_params.keys(), subsat_params_list)} + + generate_bin_center = lambda x: np.arange(x['min'], x['max'] + x['width'], x['width']) + + cape_bin_center = generate_bin_center(cape_bin_params) + subsat_bin_center = generate_bin_center(subsat_bin_params) + bl_bin_center = generate_bin_center(bl_bin_params) + + NUMBER_CAPE_BIN = cape_bin_center.size + NUMBER_SUBSAT_BIN = subsat_bin_center.size + NUMBER_BL_BIN = bl_bin_center.size + # Allocate arrays for 2D binning (CAPE, SUBSAT) - P0=np.zeros((NUMBER_SUBSAT_BIN,NUMBER_CAPE_BIN)) - P1=np.zeros((NUMBER_SUBSAT_BIN,NUMBER_CAPE_BIN)) - P2=np.zeros((NUMBER_SUBSAT_BIN,NUMBER_CAPE_BIN)) - PE=np.zeros((NUMBER_SUBSAT_BIN,NUMBER_CAPE_BIN)) + P0 = np.zeros((NUMBER_SUBSAT_BIN, NUMBER_CAPE_BIN)) + P1 = np.zeros((NUMBER_SUBSAT_BIN, NUMBER_CAPE_BIN)) + P2 = np.zeros((NUMBER_SUBSAT_BIN, NUMBER_CAPE_BIN)) + PE = np.zeros((NUMBER_SUBSAT_BIN, NUMBER_CAPE_BIN)) # Allocate arrays for 1D binning (BL) - Q0=np.zeros((NUMBER_BL_BIN)) - Q1=np.zeros((NUMBER_BL_BIN)) - Q2=np.zeros((NUMBER_BL_BIN)) - QE=np.zeros((NUMBER_BL_BIN)) - - ### Internal constants ### - - ref_thetae=340 ## reference theta_e in K to convert buoy. to temp units - gravity=9.8 ### accl. due to gravity - thresh_pres=700 ## Filter all point below this surface pressure in hPa - - ## Open and slice precip. data ### - print('LOADING thetae and pcp. values') - pr_ds=xr.open_mfdataset(os.environ["pr_file"]) - pr_ds_subset=self._slice_data(pr_ds,dimsize=3) - - thetae_ds=xr.open_mfdataset(os.environ["temp_file"]) - - thetae_bl=thetae_ds.thetae_bl.values - thetae_lt=thetae_ds.thetae_lt.values - thetae_sat_lt=thetae_ds.thetae_sat_lt.values - - ps=thetae_ds.ps.values - ps=ps*1e-2 ## Convert surface pressure to hPa - - pr=pr_ds_subset[os.environ['pr_var']].values*np.float(os.environ['pr_conversion_factor']) + Q0 = np.zeros((NUMBER_BL_BIN)) + Q1 = np.zeros((NUMBER_BL_BIN)) + Q2 = np.zeros((NUMBER_BL_BIN)) + QE = np.zeros((NUMBER_BL_BIN)) + + # Internal constants ### + + ref_thetae = 340 # reference theta_e in K to convert buoy. to temp units + gravity = 9.8 # accl. due to gravity + thresh_pres = 700 # Filter all point below this surface pressure in hPa + + # Open and slice precip. data ### + print('LOADING thetae and pcp. values') + pr_ds = xr.open_mfdataset(os.environ["pr_file"]) + pr_ds_subset = self._slice_data(pr_ds, dimsize=3) + + thetae_ds = xr.open_mfdataset(os.environ["temp_file"]) + + thetae_bl = thetae_ds.thetae_bl.values + thetae_lt = thetae_ds.thetae_lt.values + thetae_sat_lt = thetae_ds.thetae_sat_lt.values + + ps = thetae_ds.ps.values + ps = ps * 1e-2 # Convert surface pressure to hPa + + pr = pr_ds_subset[os.environ['pr_var']].values * np.float(os.environ['pr_conversion_factor']) ### - pr[pr<0]=0.0 ##in case model has spurious negative precipitation. - - ### generate a mask for land points ### - ### using region_mask function from convective transition statistics POD ### - - REGION=self._generate_region_mask(os.environ["region_mask"],pr_ds_subset) - - ### get parameters of buoyancy computation ### - ### see Ahmed et al. 2020, JAS for computation ## - delta_pl=ps-100-500 - delta_pb=100 - wb=(delta_pb/delta_pl)*np.log((delta_pl+delta_pb)/delta_pb) - wl=1-wb - - ### points with surface pressure below threshold are set to nan - wb[pssubsat_bin_center.size-1: + while np.isnan(Z[:subsat_max_pop_ind[0], :]).all(): + # if the slice along subsat axis is too short, + # shift the subsat max to a drier spot by one bin + subsat_max_pop_ind[0] = subsat_max_pop_ind[0] + 1 + if subsat_max_pop_ind[0] > subsat_bin_center.size - 1: print('...could not locate point for gamma Tq computation') break - ### Create three copies of the 2D precipitation surface array. - ### Divide the precipitation surface into three portions: the CAPE, SUBSAT and - ### overlapping portions - ### The CAPE portion is for SUBSAT values beyond the SUBSAT index of max counts - ### The SUBSAT portion is for CAPE values beyond the CAPE index of max counts - ### The overlapping portion contains the overlapping components of the CAPE and SUBSAT arrays. - - Z_subsat=np.copy(Z) - Z_subsat[:]=np.nan - Z_subsat[subsat_max_pop_ind[0]-1:,cape_max_pop_ind[0]:]=Z[subsat_max_pop_ind[0]-1:,cape_max_pop_ind[0]:] - - Z_cape=np.copy(Z) - Z_cape[:]=np.nan - Z_cape[:subsat_max_pop_ind[0],:cape_max_pop_ind[0]+1]=Z[:subsat_max_pop_ind[0],:cape_max_pop_ind[0]+1] - - Z_overlap=np.copy(Z) - Z_overlap[:]=np.nan - Z_overlap[:subsat_max_pop_ind[0],cape_max_pop_ind[0]:]=Z[:subsat_max_pop_ind[0],cape_max_pop_ind[0]:] - - - ### Get the average cape and subsat values for each of the three regions - fin0=(np.where(np.isfinite(Z_overlap))) - fin1=(np.where(np.isfinite(Z_cape))) - fin2=(np.where(np.isfinite(Z_subsat))) - - subsat_y0=subsat_bin_center[fin0[0]] - cape_x0=cape_bin_center[fin0[1]] - -# subsat_y1=subsat_bin_center[fin1[0]] - cape_x1=cape_bin_center[fin1[1]] - - subsat_y2=subsat_bin_center[fin2[0]] -# cape_x2=cape_bin_center[fin2[1]] - - - ### Get a distance measure between the overlapping region to the cape and subsat regions - - dcape=abs(cape_x0.mean()-cape_x1.mean()) - dsubsat=abs(subsat_y0.mean()-subsat_y2.mean()) - - ### Get a distance measure between the overlapping region to the cape and subsat regions - ### Compute the average precipitation within the CAPE and SUBSAT regions. - - area_cape=np.nanmean(Z_cape) - area_subsat=np.nanmean(Z_subsat) - area_overlap=np.nanmean(Z_overlap) - darea_cape=abs(area_overlap-area_cape) - darea_subsat=abs(area_overlap-area_subsat) - ratio=darea_cape*dsubsat/(dcape*darea_subsat) - num=darea_cape/dcape - denom=darea_subsat/dsubsat - - return ratio,subsat_max_pop_ind,cape_max_pop_ind,subsat_y0.mean(),cape_x0.mean(),num,denom,hist - - - def _plot_arrow(self,ax,gamma_params, + # Create three copies of the 2D precipitation surface array. + # Divide the precipitation surface into three portions: the CAPE, SUBSAT and + # overlapping portions + # The CAPE portion is for SUBSAT values beyond the SUBSAT index of max counts + # The SUBSAT portion is for CAPE values beyond the CAPE index of max counts + # The overlapping portion contains the overlapping components of the CAPE and SUBSAT arrays. + + Z_subsat = np.copy(Z) + Z_subsat[:] = np.nan + Z_subsat[subsat_max_pop_ind[0] - 1:, cape_max_pop_ind[0]:] = Z[subsat_max_pop_ind[0] - 1:, cape_max_pop_ind[0]:] + + Z_cape = np.copy(Z) + Z_cape[:] = np.nan + Z_cape[:subsat_max_pop_ind[0], :cape_max_pop_ind[0] + 1] = Z[:subsat_max_pop_ind[0], :cape_max_pop_ind[0] + 1] + + Z_overlap = np.copy(Z) + Z_overlap[:] = np.nan + Z_overlap[:subsat_max_pop_ind[0], cape_max_pop_ind[0]:] = Z[:subsat_max_pop_ind[0], cape_max_pop_ind[0]:] + + # Get the average cape and subsat values for each of the three regions + fin0 = (np.where(np.isfinite(Z_overlap))) + fin1 = (np.where(np.isfinite(Z_cape))) + fin2 = (np.where(np.isfinite(Z_subsat))) + + subsat_y0 = subsat_bin_center[fin0[0]] + cape_x0 = cape_bin_center[fin0[1]] + + cape_x1 = cape_bin_center[fin1[1]] + + subsat_y2 = subsat_bin_center[fin2[0]] + + # Get a distance measure between the overlapping region to the cape and subsat regions + + dcape = abs(cape_x0.mean() - cape_x1.mean()) + dsubsat = abs(subsat_y0.mean() - subsat_y2.mean()) + + # Get a distance measure between the overlapping region to the cape and subsat regions + # Compute the average precipitation within the CAPE and SUBSAT regions. + + area_cape = np.nanmean(Z_cape) + area_subsat = np.nanmean(Z_subsat) + area_overlap = np.nanmean(Z_overlap) + darea_cape = abs(area_overlap - area_cape) + darea_subsat = abs(area_overlap - area_subsat) + ratio = darea_cape * dsubsat / (dcape * darea_subsat) + num = darea_cape / dcape + denom = darea_subsat / dsubsat + + return ratio, subsat_max_pop_ind, cape_max_pop_ind, subsat_y0.mean(), cape_x0.mean(), num, denom, hist + + def _plot_arrow(self, ax, gamma_params, arrow_linestyle='-', arrow_color='orange'): - - gamma,x0,y0,xcentroid,ycentroid,num,denom,hist=gamma_params - - GAMMA_C0=40.0 ## internal scaling factor to plot arrow 3D - gamma_mag=np.sqrt(num**(2)+denom**(2))*GAMMA_C0 - - dx=np.cos(np.arctan(gamma))*gamma_mag - dy=gamma*dx - assert dx>0, 'x distance is negative' - - arrow = Arrow3D([xcentroid+dx, xcentroid], - [ycentroid-dy, ycentroid], - [0, 0], mutation_scale=20, - lw=2, arrowstyle="-|>", color=arrow_color, - zorder=100,linestyle=arrow_linestyle) - ax.add_artist(arrow) + gamma, x0, y0, xcentroid, ycentroid, num, denom, hist = gamma_params - def _plot_precip_surface(self, fig, ax, xbin, ybin, Z, Zoffset, gamma_params, - fig_params, xbin_ref=None, ybin_ref=None, Z_ref=None, - plot_ref=False, gamma_params_ref=None, plot_cbar=False, - cbar_coords=[1.01,0.35,1.0,0.05]): + GAMMA_C0 = 40.0 # internal scaling factor to plot arrow 3D + gamma_mag = np.sqrt(num ** 2 + denom ** 2) * GAMMA_C0 + dx = np.cos(np.arctan(gamma)) * gamma_mag + dy = gamma * dx + assert dx > 0, 'x distance is negative' + arrow = Arrow3D([xcentroid + dx, xcentroid], + [ycentroid - dy, ycentroid], + [0, 0], mutation_scale=20, + lw=2, arrowstyle="-|>", color=arrow_color, + zorder=100, linestyle=arrow_linestyle) + ax.add_artist(arrow) + + def _plot_precip_surface(self, fig, ax, xbin, ybin, Z, Zoffset, gamma_params, + fig_params, xbin_ref=None, ybin_ref=None, Z_ref=None, + plot_ref=False, gamma_params_ref=None, plot_cbar=False, + cbar_coords=[1.01, 0.35, 1.0, 0.05]): + X, Y = np.meshgrid(xbin, ybin) + Zcopy = np.copy(Z) + Zcopy[Zcopy < 0.25] = np.nan # remove points below 0.25 mm/hr - - X, Y = np.meshgrid(xbin,ybin) - Zcopy=np.copy(Z) - Zcopy[Zcopy<0.25]=np.nan ### remove points below 0.25 mm/hr + Zcopy1 = np.copy(Z) - Zcopy1=np.copy(Z) + # Plot precip surface ### + # choose boundaries to chop in 3D plot ### + indx1 = np.where(xbin > fig_params['f1'][0][1])[0] + indy1 = np.where(np.isfinite(ybin))[0] - ### Plot precip surface ### - ### choose boundaries to chop in 3D plot ### - indx1=np.where(xbin>fig_params['f1'][0][1])[0] - indy1=np.where(np.isfinite(ybin))[0] + indx = np.where(np.isfinite(xbin))[0] + indy = np.where(ybin < fig_params['f1'][1][0])[0] - indx=np.where(np.isfinite(xbin))[0] - indy=np.where(ybin','BCC-CSM2-MR': '+', 'CESM2': 'o', - 'CMCC-CM2-ESM2': '*', 'CNRM-CM6-1': '<','CNRM-CM6-1-HR': 'D','CanESM5': '>', - 'FGOALS-g3': '+','GFDL-CM4': 'o','GISS-E2-1-G': '*','IPSL-CM5A2-INCA': '<', - 'IPSL-CM6A-LR': 'D', 'KACE-1-0-G': '>', 'MIROC-ES2L': '+','MIROC6': 'o', - 'MPI-ESM1-2-HR': '*', 'MPI-ESM1-2-LR': '<', 'MRI-ESM2-0': 'D','NESM3': '>', - 'NorESM2-LM': '+','NorESM2-MM': 'o','SAM0-UNICON': '*','TaiESM1': '<', - 'obs': 'D','obs_2deg': '>'} - - model_colors_dict={'ACCESS-CM2': (0.2, 0.0, 0.2, 1.0), - 'ACCESS-ESM1-5': (0.3333571428571428, 0.0, 0.3809285714285714, 1.0), - 'AWI-ESM-1-1-LR': (0.49524285714285715, 0.0, 0.5618857142857143, 1.0), - 'BCC-CSM2-MR': (0.4571142857142858, 0.0, 0.6095285714285714, 1.0), - 'CESM2': (0.07618571428571436, 0.0, 0.6571714285714285, 1.0), - 'CMCC-CM2-ESM2': (0.0, 0.0, 0.7809857142857141, 1.0), - 'CMCC-CM2-SR5': (0.0, 0.133342857142857, 0.8667, 1.0), - 'CNRM-CM6-1': (0.0, 0.4667, 0.8667, 1.0), - 'CNRM-CM6-1-HR': (0.0, 0.5619142857142857, 0.8667, 1.0), - 'CNRM-ESM2-1': (0.0, 0.6285857142857143, 0.7809857142857143, 1.0), - 'CanESM5': (0.0, 0.6667, 0.6476428571428572, 1.0), - 'FGOALS-g3': (0.0, 0.6667, 0.5523571428571429, 1.0), - 'GFDL-CM4': (0.0, 0.6285857142857143, 0.22855714285714296, 1.0), - 'GISS-E2-1-G': (0.0, 0.6380857142857141, 0.0, 1.0), - 'IPSL-CM6A-LR': (0.0, 0.6380857142857141, 0.0, 1.0), - 'IPSL-CM5A2-INCA': (0.0, 0.8285857142857141, 0.0, 1.0), - 'KACE-1-0-G': (0.0, 0.9238285714285713, 0.0, 1.0), - 'MIROC-ES2L': (0.10475714285714258, 1.0, 0.0, 1.0), - 'MIROC6': (0.6285428571428574, 1.0, 0.0, 1.0), - 'MPI-ESM-1-2-HAM': (0.8475857142857139, 0.9618857142857143, 0.0, 1.0), - 'MPI-ESM1-2-HR': (0.9523571428571428, 0.895214285714286, 0.0, 1.0), - 'MPI-ESM1-2-LR': (1.0, 0.8, 0.0, 1.0), - 'MRI-ESM2-0': (1.0, 0.4571428571428574, 0.0, 1.0), - 'NESM3': (0.9809571428571429, 0.0, 0.0, 1.0), - 'NorESM2-LM': (0.8857428571428572, 0.0, 0.0, 1.0), - 'NorESM2-MM': (0.8285857142857144, 0.0, 0.0, 1.0), - 'SAM0-UNICON': (0.8, 0.22857142857142743, 0.22857142857142743, 1.0), - 'TaiESM1': (0.8, 0.2, 0.4, 1.0)} - + + def _polar_plot(self, ax, ds_polar, gamma_params): + + # define markers and colors for CMIP6 models + + model_marker = {'ACCESS-ESM1-5': 'D', 'AWI-ESM-1-1-LR': '>', 'BCC-CSM2-MR': '+', 'CESM2': 'o', + 'CMCC-CM2-ESM2': '*', 'CNRM-CM6-1': '<', 'CNRM-CM6-1-HR': 'D', 'CanESM5': '>', + 'FGOALS-g3': '+', 'GFDL-CM4': 'o', 'GISS-E2-1-G': '*', 'IPSL-CM5A2-INCA': '<', + 'IPSL-CM6A-LR': 'D', 'KACE-1-0-G': '>', 'MIROC-ES2L': '+', 'MIROC6': 'o', + 'MPI-ESM1-2-HR': '*', 'MPI-ESM1-2-LR': '<', 'MRI-ESM2-0': 'D', 'NESM3': '>', + 'NorESM2-LM': '+', 'NorESM2-MM': 'o', 'SAM0-UNICON': '*', 'TaiESM1': '<', + 'obs': 'D', 'obs_2deg': '>'} + + model_colors_dict = {'ACCESS-CM2': (0.2, 0.0, 0.2, 1.0), + 'ACCESS-ESM1-5': (0.3333571428571428, 0.0, 0.3809285714285714, 1.0), + 'AWI-ESM-1-1-LR': (0.49524285714285715, 0.0, 0.5618857142857143, 1.0), + 'BCC-CSM2-MR': (0.4571142857142858, 0.0, 0.6095285714285714, 1.0), + 'CESM2': (0.07618571428571436, 0.0, 0.6571714285714285, 1.0), + 'CMCC-CM2-ESM2': (0.0, 0.0, 0.7809857142857141, 1.0), + 'CMCC-CM2-SR5': (0.0, 0.133342857142857, 0.8667, 1.0), + 'CNRM-CM6-1': (0.0, 0.4667, 0.8667, 1.0), + 'CNRM-CM6-1-HR': (0.0, 0.5619142857142857, 0.8667, 1.0), + 'CNRM-ESM2-1': (0.0, 0.6285857142857143, 0.7809857142857143, 1.0), + 'CanESM5': (0.0, 0.6667, 0.6476428571428572, 1.0), + 'FGOALS-g3': (0.0, 0.6667, 0.5523571428571429, 1.0), + 'GFDL-CM4': (0.0, 0.6285857142857143, 0.22855714285714296, 1.0), + 'GISS-E2-1-G': (0.0, 0.6380857142857141, 0.0, 1.0), + 'IPSL-CM6A-LR': (0.0, 0.6380857142857141, 0.0, 1.0), + 'IPSL-CM5A2-INCA': (0.0, 0.8285857142857141, 0.0, 1.0), + 'KACE-1-0-G': (0.0, 0.9238285714285713, 0.0, 1.0), + 'MIROC-ES2L': (0.10475714285714258, 1.0, 0.0, 1.0), + 'MIROC6': (0.6285428571428574, 1.0, 0.0, 1.0), + 'MPI-ESM-1-2-HAM': (0.8475857142857139, 0.9618857142857143, 0.0, 1.0), + 'MPI-ESM1-2-HR': (0.9523571428571428, 0.895214285714286, 0.0, 1.0), + 'MPI-ESM1-2-LR': (1.0, 0.8, 0.0, 1.0), + 'MRI-ESM2-0': (1.0, 0.4571428571428574, 0.0, 1.0), + 'NESM3': (0.9809571428571429, 0.0, 0.0, 1.0), + 'NorESM2-LM': (0.8857428571428572, 0.0, 0.0, 1.0), + 'NorESM2-MM': (0.8285857142857144, 0.0, 0.0, 1.0), + 'SAM0-UNICON': (0.8, 0.22857142857142743, 0.22857142857142743, 1.0), + 'TaiESM1': (0.8, 0.2, 0.4, 1.0)} + for key in ds_polar.model: if str(key.values).split('_')[0] not in ['obs']: - - ### Plot model markers ### - - if key in ['NESM3','NorESM2-MM']: - facecolor_option='None' - alpha_option=0.75 + + # Plot model markers ### + + if key in ['NESM3', 'NorESM2-MM']: + facecolor_option = 'None' + alpha_option = 0.75 else: - facecolor_option=model_colors_dict[str(key.values)] - alpha_option=0.5 - - ds_sel=ds_polar.sel(model=key) - ax.scatter(np.deg2rad(ds_sel.gamma_deg),ds_sel.gamma_mag,label=key.values, - color=model_colors_dict[str(key.values)],marker=model_marker[str(key.values)], - s=75,alpha=alpha_option, facecolor=facecolor_option) - + facecolor_option = model_colors_dict[str(key.values)] + alpha_option = 0.5 + + ds_sel = ds_polar.sel(model=key) + ax.scatter(np.deg2rad(ds_sel.gamma_deg), ds_sel.gamma_mag, label=key.values, + color=model_colors_dict[str(key.values)], marker=model_marker[str(key.values)], + s=75, alpha=alpha_option, facecolor=facecolor_option) + elif key in ['obs']: - - ### Plot obs. ### - - ds_sel=ds_polar.sel(model=key) - ln_obs=ax.scatter(np.deg2rad(ds_sel.gamma_deg),ds_sel.gamma_mag,color='grey', - s=60,alpha=1.0,marker='s',facecolor='grey',edgecolor='black',zorder=9) - - ds_sel=ds_polar.sel(model='obs_2deg') - ln_obs_2deg=ax.scatter(np.deg2rad(ds_sel.gamma_deg),ds_sel.gamma_mag,color='grey', - s=60,alpha=1.0,marker='o',facecolor='grey',edgecolor='black',zorder=9) - - ax.tick_params(which='both',labelsize=13) - - - ### Place error bars on obs. markers and mark uncertainty range### - rad=np.arange(0,25.5,.5) - ax.plot([np.deg2rad(ds_polar.sel(model='obs_2deg_95').gamma_deg)]*rad.size,rad, - c='grey',linestyle='--') - ax.plot([np.deg2rad(ds_polar.sel(model='obs_5').gamma_deg)]*rad.size,rad, - c='grey',linestyle='--') - - ### strange: when the bottom >0; we require a factor of 1.05 in front of the theta argument below. - bootstrap_deg_range=ds_polar.sel(model='obs_95').gamma_deg-ds_polar.sel(model='obs_5').gamma_deg - bootstrap_mag_range=ds_polar.sel(model='obs_95').gamma_mag-ds_polar.sel(model='obs_5').gamma_mag - - ax.bar(np.deg2rad(ds_polar.sel(model='obs_5').gamma_deg)*1.05, bootstrap_mag_range, - width=np.deg2rad(bootstrap_deg_range), + + # Plot obs. ### + + ds_sel = ds_polar.sel(model=key) + ln_obs = ax.scatter(np.deg2rad(ds_sel.gamma_deg), ds_sel.gamma_mag, color='grey', + s=60, alpha=1.0, marker='s', facecolor='grey', edgecolor='black', zorder=9) + + ds_sel = ds_polar.sel(model='obs_2deg') + ln_obs_2deg = ax.scatter(np.deg2rad(ds_sel.gamma_deg), ds_sel.gamma_mag, color='grey', + s=60, alpha=1.0, marker='o', facecolor='grey', edgecolor='black', zorder=9) + + ax.tick_params(which='both', labelsize=13) + + # Place error bars on obs. markers and mark uncertainty range### + rad = np.arange(0, 25.5, .5) + ax.plot([np.deg2rad(ds_polar.sel(model='obs_2deg_95').gamma_deg)] * rad.size, rad, + c='grey', linestyle='--') + ax.plot([np.deg2rad(ds_polar.sel(model='obs_5').gamma_deg)] * rad.size, rad, + c='grey', linestyle='--') + + # strange: when the bottom >0; we require a factor of 1.05 in front of the theta argument below. + bootstrap_deg_range = ds_polar.sel(model='obs_95').gamma_deg - ds_polar.sel(model='obs_5').gamma_deg + bootstrap_mag_range = ds_polar.sel(model='obs_95').gamma_mag - ds_polar.sel(model='obs_5').gamma_mag + + ax.bar(np.deg2rad(ds_polar.sel(model='obs_5').gamma_deg) * 1.05, bootstrap_mag_range, + width=np.deg2rad(bootstrap_deg_range), bottom=ds_polar.sel(model='obs_5').gamma_mag, - color='grey', edgecolor = 'black',alpha=0.5) + color='grey', edgecolor='black', alpha=0.5) - bootstrap_deg_range=ds_polar.sel(model='obs_2deg_95').gamma_deg-ds_polar.sel(model='obs_2deg_5').gamma_deg - bootstrap_mag_range=ds_polar.sel(model='obs_2deg_95').gamma_mag-ds_polar.sel(model='obs_2deg_5').gamma_mag + bootstrap_deg_range = ds_polar.sel(model='obs_2deg_95').gamma_deg - ds_polar.sel(model='obs_2deg_5').gamma_deg + bootstrap_mag_range = ds_polar.sel(model='obs_2deg_95').gamma_mag - ds_polar.sel(model='obs_2deg_5').gamma_mag - ax.bar(np.deg2rad(ds_polar.sel(model='obs_2deg_5').gamma_deg)*1.05, - bootstrap_mag_range, width=np.deg2rad(bootstrap_deg_range), + ax.bar(np.deg2rad(ds_polar.sel(model='obs_2deg_5').gamma_deg) * 1.05, + bootstrap_mag_range, width=np.deg2rad(bootstrap_deg_range), bottom=ds_polar.sel(model='obs_2deg_5').gamma_mag, - color='grey', edgecolor = 'black',alpha=0.5) + color='grey', edgecolor='black', alpha=0.5) - ### Plot candidate model ### - - gamma,num,denom=gamma_params[0], gamma_params[-3], gamma_params[-2] - gamma_mag=np.sqrt(num**(2)+denom**(2)) - ln_cand=ax.scatter(np.arctan(gamma),gamma_mag, - color='black',s=200,alpha=1.0,marker='*',facecolor='black',zorder=9) + # Plot candidate model ### + gamma, num, denom = gamma_params[0], gamma_params[-3], gamma_params[-2] + gamma_mag = np.sqrt(num ** (2) + denom ** (2)) + ln_cand = ax.scatter(np.arctan(gamma), gamma_mag, + color='black', s=200, alpha=1.0, marker='*', facecolor='black', zorder=9) ax.set_thetamin(0) ax.set_thetamax(90) - ax.set_ylim([0,0.6]) - ax.text(0.25,-0.15,"$|\gamma_{cs}|$ (mm $\mathrm{hr}^{-1}\mathrm{ K}^{-1}$)",fontsize=13, + ax.set_ylim([0, 0.6]) + ax.text(0.25, -0.15, "$|\gamma_{cs}|$ (mm $\mathrm{hr}^{-1}\mathrm{ K}^{-1}$)", fontsize=13, transform=ax.transAxes) - leg=ax.legend(fontsize=11,ncol=2,loc=(1.15,0.0)) - leg1=ax.legend((ln_obs,ln_obs_2deg,ln_cand),('ERA5/TRMM3B42 (0.25 deg.)', - 'ERA5/TRMM3B42 (2 deg.)','{CASENAME}'.format(**os.environ)),fontsize=11, - ncol=2,loc=(1.15,0.75)) + leg = ax.legend(fontsize=11, ncol=2, loc=(1.15, 0.0)) + leg1 = ax.legend((ln_obs, ln_obs_2deg, ln_cand), ('ERA5/TRMM3B42 (0.25 deg.)', + 'ERA5/TRMM3B42 (2 deg.)', '{CASENAME}'.format(**os.environ)), + fontsize=11, + ncol=2, loc=(1.15, 0.75)) ax.add_artist(leg) leg.get_frame().set_edgecolor('black') leg1.get_frame().set_edgecolor('black') - - ### Figure caption - caption='This polar plot summarizes information about the thermodynamic sensitivity of convection'\ - ' in \nmultiple models. The angle measures the relative $\mathrm{CAPE}_\mathrm{L}$-$\mathrm{SUBSAT}_\mathrm{L}$'\ - 'sensitivity of model convection;\nthe radius meaures the strength of the precipitation pickup.\n\n'\ - 'The observational baseline is set by the grey square and circle.'\ - ' The grey shaded regions denote\nthe uncertainty range in observations.'\ - ' The black star denotes the model being evaluated. The other\ncolor markers denote different CMIP6 models. See POD documentation for more information.' + + # Figure caption + caption = 'This polar plot summarizes information about the thermodynamic sensitivity of convection' \ + ' in \nmultiple models. The angle measures the relative $\mathrm{CAPE}' \ + '_\mathrm{L}$-$\mathrm{SUBSAT}_\mathrm{L}$' \ + 'sensitivity of model convection;\nthe radius meaures the strength of the precipitation pickup.\n\n' \ + 'The observational baseline is set by the grey square and circle.' \ + ' The grey shaded regions denote\nthe uncertainty range in observations.' \ + ' The black star denotes the model being evaluated.' \ + ' The other\ncolor markers denote different CMIP6 models.' \ + ' See POD documentation for more information.' props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) - txt=ax.text(0.0, -.65, caption, transform=ax.transAxes, - ha='left',fontsize=14,bbox=props) + txt = ax.text(0.0, -.65, caption, transform=ax.transAxes, + ha='left', fontsize=14, bbox=props) - -### Class for 3D arrows +# Class for 3D arrows class Arrow3D(FancyArrowPatch): def __init__(self, xs, ys, zs, *args, **kwargs): - FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs) + FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs) self._verts3d = xs, ys, zs def draw(self, renderer): xs3d, ys3d, zs3d = self._verts3d xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M) - self.set_positions((xs[0],ys[0]),(xs[1],ys[1])) + self.set_positions((xs[0], ys[0]), (xs[1], ys[1])) FancyArrowPatch.draw(self, renderer) diff --git a/diagnostics/precip_buoy_diag/settings.jsonc b/diagnostics/precip_buoy_diag/settings.jsonc index 0bfd4bb57..384d8b15f 100644 --- a/diagnostics/precip_buoy_diag/settings.jsonc +++ b/diagnostics/precip_buoy_diag/settings.jsonc @@ -17,7 +17,7 @@ "settings": { "driver": "precip_buoy_diag.py", "long_name": "Precipitation Buoyancy Diagnostics", - "realm": "atmos", + "convention" : "cmip", "description": "Precipitation Buoyancy Diagnostics", // threshold precipitation value to compute precipitating pdfs "pod_env_vars": { "PRECIP_THRESHOLD": "0.25", @@ -27,49 +27,63 @@ "python3": ["numpy", "scipy", "cython", "matplotlib", "xarray", "numba", "networkx"] } }, - "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, - "lev": { - "standard_name": "air_pressure", - "positive": "down" - }, - "time": {"standard_name": "time"} - }, + "dimensions": { + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, + "lev": { + "standard_name": "air_pressure", + "units": "hPa", + "positive": "down", + "axis": "Z" + }, + "time": {"standard_name": "time"} + }, "varlist": { - "pr": { - "standard_name": "precipitation_flux", - "freq": "1hr", - "requirement": "required", - "units": "kg m-2 s-1", - "dimensions" : ["time", "lat", "lon"], - "multi_file_ok" : true - }, - "ta": { - "standard_name": "air_temperature", - "freq": "1hr", - "requirement": "required", - "units": "K", - "dimensions" : ["time", "lev", "lat", "lon"], - "multi_file_ok" : true - }, - "qa": { - "standard_name": "specific_humidity", - "freq": "1hr", - "requirement": "required", - "units": "kg/kg", - "dimensions" : ["time", "lev", "lat", "lon"], - "multi_file_ok" : true - }, - "ps": { - "standard_name": "surface_air_pressure", - "freq": "1hr", - // this is optional only if height co-ordinates are on pressure levels - // if height co-ordinates are sigma, then surface pressure is required. - "requirement": "optional", - "units": "Pa", - "dimensions" : ["time", "lat", "lon"], - "multi_file_ok" : true - } - } + "pr": { + "standard_name": "precipitation_flux", + "freq": "1hr", + "realm": "atmos", + "requirement": "required", + "units": "kg m-2 s-1", + "dimensions" : ["time", "lat", "lon"], + "multi_file_ok" : true + }, + "ta": { + "standard_name": "air_temperature", + "realm": "atmos", + "freq": "1hr", + "requirement": "required", + "units": "K", + "dimensions" : ["time", "lev", "lat", "lon"], + "multi_file_ok" : true + }, + "qa": { + "standard_name": "specific_humidity", + "realm": "atmos", + "freq": "1hr", + "requirement": "required", + "units": "kg/kg", + "dimensions" : ["time", "lev", "lat", "lon"], + "multi_file_ok" : true + }, + "ps": { + "standard_name": "surface_air_pressure", + "realm": "atmos", + "freq": "1hr", + // this is optional only if height co-ordinates are on pressure levels + // if height co-ordinates are sigma, then surface pressure is required. + "requirement": "optional", + "units": "Pa", + "dimensions" : ["time", "lat", "lon"], + "multi_file_ok" : true + } + } } diff --git a/diagnostics/precip_diurnal_cycle/pr_diurnal_cycle.ncl b/diagnostics/precip_diurnal_cycle/pr_diurnal_cycle.ncl index a915e59af..db4128d10 100644 --- a/diagnostics/precip_diurnal_cycle/pr_diurnal_cycle.ncl +++ b/diagnostics/precip_diurnal_cycle/pr_diurnal_cycle.ncl @@ -13,14 +13,14 @@ begin case_desc = getenv("CASENAME") casename = getenv("CASENAME") -dir_stub = getenv("WK_DIR") +dir_stub = getenv("OR_DIR") filename = getenv("PR_FILE") ;fincl = getenv("fincl") ver = getenv("ver") file_obs_DJF = getenv("OBS_DATA")+"/TRMM_DJF.nc" file_obs_JJA = getenv("OBS_DATA")+"/TRMM_JJA.nc" -start_year = stringtodouble(getenv("FIRSTYR")) -end_year = stringtodouble(getenv("LASTYR")) +start_year = stringtodouble(getenv("startdate")) +end_year = stringtodouble(getenv("enddate")) vars = (/getenv("pr_var"),"TRMM"/) slatlim = -90. nlatlim = 90. diff --git a/diagnostics/precip_diurnal_cycle/pr_diurnal_phase.ncl b/diagnostics/precip_diurnal_cycle/pr_diurnal_phase.ncl index eda98e3e0..53455f06a 100644 --- a/diagnostics/precip_diurnal_cycle/pr_diurnal_phase.ncl +++ b/diagnostics/precip_diurnal_cycle/pr_diurnal_phase.ncl @@ -33,9 +33,9 @@ begin phase_only = False ; Set true iff not amplitude color saturation is required. ; ps_dir = "/datalocal/haystack/cchen/dcycle/" - ps_dir = getenv("WK_DIR")+"/model/PS/" - firstyr = stringtoint(getenv("FIRSTYR")) - lastyr = stringtoint(getenv("LASTYR")) + ps_dir = getenv("WORK_DIR")+"/model/PS/" + firstyr = stringtoint(getenv("startdate")) + lastyr = stringtoint(getenv("enddate")) print(firstyr) print(lastyr) ;;;;; For model only ;;;;; diff --git a/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.html b/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.html index c3a1810fc..237f3148b 100644 --- a/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.html +++ b/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.html @@ -8,7 +8,8 @@

Diurnal Cycle of Precipitation

a lat-lon model output of total precipitation with observed precipitation derived from the Tropical Rainfall Measuring Mission.

-Full Documentation and Contact Information + + Full Documentation and Contact Information

diff --git a/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.py b/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.py index 8027edb60..b0577e3f4 100644 --- a/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.py +++ b/diagnostics/precip_diurnal_cycle/precip_diurnal_cycle.py @@ -16,15 +16,17 @@ # Community Climate System Model? A Comparison to Reanalysis, # Satellite, and Gridded Station Data. # J. Climate, 27, 5219-5239. -#============================================================ +# ============================================================ import os import subprocess import time -#============================================================ +# ============================================================ # generate_ncl_plots - call a nclPlotFile via subprocess call -#============================================================ +# ============================================================ + + def generate_ncl_plots(nclPlotFile): """generate_plots_call - call a nclPlotFile via subprocess call @@ -40,22 +42,23 @@ def generate_ncl_plots(nclPlotFile): while pipe.poll() is None: time.sleep(0.5) except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) return 0 + if os.path.isfile(os.environ["PR_FILE"]): print("3 hourly precipitation rate file found") print("computing diurnal cycle of precipitation") -#============================================================ +# ============================================================ # Call NCL code here -#============================================================ +# ============================================================ print("--------- Starting DIURNAL CYCLE OF PRECIPITATION generate figures----------------------------") - if ( True ): - generate_ncl_plots(os.environ["POD_HOME"]+"/pr_diurnal_cycle.ncl") + if True: + generate_ncl_plots(os.environ["POD_HOME"] + "/pr_diurnal_cycle.ncl") - generate_ncl_plots(os.environ["POD_HOME"]+"/pr_diurnal_phase.ncl") + generate_ncl_plots(os.environ["POD_HOME"] + "/pr_diurnal_phase.ncl") else: print("WARNING: For testing purposes, skipping diurnal cycle figure generation") diff --git a/diagnostics/precip_diurnal_cycle/settings.jsonc b/diagnostics/precip_diurnal_cycle/settings.jsonc index 266570e4b..d15c3d350 100644 --- a/diagnostics/precip_diurnal_cycle/settings.jsonc +++ b/diagnostics/precip_diurnal_cycle/settings.jsonc @@ -12,7 +12,7 @@ "settings" : { "driver" : "precip_diurnal_cycle.py", "long_name": "Diurnal cycle of precipitation", - "realm" : "atmos", + "convention" : "cmip", "description": "Diurnal Cycle of Precipitation", "runtime_requirements": { "python3": [], @@ -20,13 +20,22 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"} }, "varlist": { "pr": { "standard_name": "precipitation_flux", + "realm": "atmos", "units": "kg m-2 s-1", "dimensions": ["time", "lat", "lon"], "frequency": "3hr" diff --git a/diagnostics/seaice_suite/seaice_suite.html b/diagnostics/seaice_suite/seaice_suite.html index e6dc41444..e55449a60 100644 --- a/diagnostics/seaice_suite/seaice_suite.html +++ b/diagnostics/seaice_suite/seaice_suite.html @@ -9,7 +9,10 @@

Sea Ice Suite

reference.

-All correlations are computed after detrending. For a one-month lag, the map for January shows the correlation of January and February. The map for February shows the correlation of February and March. And so forth. For a one-year lag, the map for January shows the correlation of January and January a year later. And so forth. + All correlations are computed after detrending. + For a one-month lag, the map for January shows the correlation of January and February. + The map for February shows the correlation of February and March. And so forth. For a one-year lag, + the map for January shows the correlation of January and January a year later, and so forth.

The results from this POD are to be appear in a paper being prepared @@ -19,7 +22,7 @@

Sea Ice Suite

- + href=model/themean_{{startdate}}-{{enddate}}.png>plot + href=model/themean_anomalies_{{startdate}}-{{enddate}}.png>plot + href=model/trend_{{startdate}}-{{enddate}}.png>plot + href=model/trend_anomalies_{{startdate}}-{{enddate}}.png>plot + href=model/detrendedstd_{{startdate}}-{{enddate}}.png>plot + href=model/detrendedstd_anomalies_{{startdate}}-{{enddate}}.png>plot + href=model/onemolagcorr_{{startdate}}-{{enddate}}.png>plot + href=model/onemolagcorr_anomalies_{{startdate}}-{{enddate}}.png>plot + href=model/oneyrlagcorr_{{startdate}}-{{enddate}}.png>plot - + href=model/oneyrlagcorr_anomalies_{{startdate}}-{{enddate}}.png>plot
Sea ice concentration statistics for {{FIRSTYR}}-{{LASTYR}}: +Sea ice concentration statistics for {{startdate}}-{{enddate}}: @@ -27,60 +30,60 @@

Sea Ice Suite

Model Mean plot
Model minus Observed Mean plot
Model Trend plot
Model minus Observed Trend plot
Model Standard Deviation after Detrending plot
Model minus Observerd Standard Deviation after Detrending plot
Model One-Month Lagged Correlation plot
Model minus Observerd One-Month Lagged Correlation plot
Model One-Year Lagged Correlation plot
Model minus Observerd One-Year Lagged Correlation plot
+ diff --git a/diagnostics/seaice_suite/seaice_suite_sic_mean_sigma.py b/diagnostics/seaice_suite/seaice_suite_sic_mean_sigma.py index 0204cf9fe..043581b0b 100644 --- a/diagnostics/seaice_suite/seaice_suite_sic_mean_sigma.py +++ b/diagnostics/seaice_suite/seaice_suite_sic_mean_sigma.py @@ -92,26 +92,26 @@ def readindata(file, varname='siconc', firstyr='1979', lastyr='2014'): # 1) Loading model data files: input_file = "{DATADIR}/mon/{CASENAME}.{siconc_var}.mon.nc".format(**os.environ) -obsoutput_dir = "{WK_DIR}/obs/".format(**os.environ) -modoutput_dir = "{WK_DIR}/model/".format(**os.environ) -figures_dir = "{WK_DIR}/model/".format(**os.environ) +obsoutput_dir = "{WORK_DIR}/obs/".format(**os.environ) +modoutput_dir = "{WORK_DIR}/model/".format(**os.environ) +figures_dir = "{WORK_DIR}/model/".format(**os.environ) obs_file = '{OBS_DATA}/HadISST_ice_1979-2016_grid_nh.nc'.format(**os.environ) proc_obs_file = obsoutput_dir+'HadISST_stats_1979-2014.nc'.format(**os.environ) proc_mod_file = modoutput_dir+'seaice_fullfield_stats.nc' modelname = "{CASENAME}".format(**os.environ) siconc_var = "{siconc_var}".format(**os.environ) -firstyr = "{FIRSTYR}".format(**os.environ) -lastyr = "{LASTYR}".format(**os.environ) +firstyr = "{startdate}".format(**os.environ) +lastyr = "{enddate}".format(**os.environ) # obsfirstyr and obslastyr may be changed in the POD settings.jsonc file obsfirstyr = "{obsfirstyr}".format(**os.environ) obslastyr = "{obslastyr}".format(**os.environ) -processmod = not(os.path.isfile(proc_mod_file)) # check if obs proc file exists +processmod = not os.path.isfile(proc_mod_file) # check if obs proc file exists if processmod: field = readindata(input_file, siconc_var, firstyr, lastyr) -processobs = not(os.path.isfile(proc_obs_file)) # check if obs proc file exists +processobs = not(os.path.isfile(proc_obs_file)) # check if obs proc file exists if processobs: # if no proc file then must get obs and process obs = readindata(obs_file, 'sic', firstyr=obsfirstyr, lastyr=obslastyr) @@ -134,10 +134,10 @@ def mainmonthlystats(field=None, firstyr=1979, lastyr=2014): field = xr_reshape(field,'time',['year','month'],[np.arange(firstyr,lastyr+1),np.arange(12)]) print('computing trend, this may take a few minutes') trend, intercept = xr.apply_ufunc(_lrm, field.year, field, - input_core_dims=[['year'], ['year']], - output_core_dims=[[],[]], - output_dtypes=[float, float], - vectorize=True) + input_core_dims=[['year'], ['year']], + output_core_dims=[[],[]], + output_dtypes=[float, float], + vectorize=True) #dask='parallelized') print('computing the rest') @@ -180,8 +180,7 @@ def lagcorr(residuals, lag=1): kwargs={'lag': lag}, output_core_dims=[['month']], output_dtypes=[float], - vectorize=True) - # dask='parallelized') + vectorize=True) # dask='parallelized') rlag.name = 'lagcorr' rlag=rlag.transpose('month',...) @@ -196,10 +195,10 @@ def processandsave(field, file_out, firstyr=1979, lastyr=2014): # residuals are detrended field for each month, effectively making them detrended and deseasonalized themean, thestd, trend, detrendedstd, residuals = mainmonthlystats(field, firstyr, lastyr) print('main stats done') - onemolagcorr=lagcorr(residuals,1) - oneyrlagcorr=lagcorr(residuals,12) - onemolagcorr.name='onemolagcorr' - oneyrlagcorr.name='oneyrlagcorr' + onemolagcorr = lagcorr(residuals,1) + oneyrlagcorr = lagcorr(residuals,12) + onemolagcorr.name = 'onemolagcorr' + oneyrlagcorr.name = 'oneyrlagcorr' # 3) Save output data: @@ -232,22 +231,22 @@ def processandsave(field, file_out, firstyr=1979, lastyr=2014): # 4) Read processed data, regrid model to obs grid, plot, saving figures: obsstats = xr.open_dataset(proc_obs_file) -obsstats=obsstats.rename({'latitude':'lat'}) -obsstats=obsstats.rename({'longitude':'lon'}) +obsstats = obsstats.rename({'latitude':'lat'}) +obsstats = obsstats.rename({'longitude':'lon'}) -obsmean=obsstats.themean -obsstd=obsstats.thestd -obstrend=obsstats.trend -obsdetrendedstd=obsstats.detrended_std -obsonemolagcorr=obsstats.onemolagcorr -obsoneyrlagcorr=obsstats.oneyrlagcorr +obsmean = obsstats.themean +obsstd = obsstats.thestd +obstrend = obsstats.trend +obsdetrendedstd = obsstats.detrended_std +obsonemolagcorr = obsstats.onemolagcorr +obsoneyrlagcorr = obsstats.oneyrlagcorr modstats = xr.open_dataset(proc_mod_file) coords = [a for a in modstats.coords] if 'longitude' in coords: - modstats=modstats.rename({'latitude':'lat'}) - modstats=modstats.rename({'longitude':'lon'}) + modstats = modstats.rename({'latitude':'lat'}) + modstats = modstats.rename({'longitude':'lon'}) # regrid model data to obs grid method = 'nearest_s2d' @@ -281,17 +280,17 @@ def monthlyplot(field, obs=None, edgec=None, figfile='./figure.png', edgec = 'yellow' for m, themonth in enumerate(monthabbrev): - ax = plt.subplot(3,4,m+1,projection = ccrs.NorthPolarStereo()) - ax.add_feature(cfeature.LAND,zorder=100,edgecolor='k',facecolor='darkgrey') + ax = plt.subplot(3, 4, m+1, projection=ccrs.NorthPolarStereo()) + ax.add_feature(cfeature.LAND, zorder=100, edgecolor='k', facecolor='darkgrey') ax.set_extent([0.005, 360, 50, 90], crs=ccrs.PlateCarree()) - pl = field.sel(month=m).plot(x='lon', y='lat', vmin=vmin, vmax=vmax, - transform=ccrs.PlateCarree(),cmap=cmap_c,add_colorbar=False) + pl = field.sel(month=m).plot(x='lon', y='lat', vmin=vmin, vmax=vmax, + transform=ccrs.PlateCarree(), cmap=cmap_c, add_colorbar=False) if edge: - obs.sel(month=m).plot.contour(levels=[.15],x='lon', y='lat', linewidths=2, - transform=ccrs.PlateCarree(),colors=[edgec]) + obs.sel(month=m).plot.contour(levels=[.15], x='lon', y='lat', linewidths=2, + transform=ccrs.PlateCarree(), colors=[edgec]) - ax.set_title(themonth,fontsize=14) + ax.set_title(themonth, fontsize=14) fig.suptitle(f"{modelname} {statname} Sea Ice Concentration {firstyr}-{lastyr}", fontsize=18) @@ -300,7 +299,7 @@ def monthlyplot(field, obs=None, edgec=None, figfile='./figure.png', cbar.ax.set_title(unitname,fontsize=14) cbar.ax.tick_params(labelsize=12) plt.subplots_adjust(bottom=0.15) - plt.savefig(figfile, format='png',dpi=300) + plt.savefig(figfile, format='png', dpi=300) plt.show() plt.close() return diff --git a/diagnostics/seaice_suite/settings.jsonc b/diagnostics/seaice_suite/settings.jsonc index a1cae71ac..046bf2861 100644 --- a/diagnostics/seaice_suite/settings.jsonc +++ b/diagnostics/seaice_suite/settings.jsonc @@ -2,7 +2,7 @@ "settings": { "driver": "seaice_suite_sic_mean_sigma.py", "long_name": "Arctic Sea Ice Suite Diagnostics", - "realm": "seaIce", + "convention": "cmip", "description": "Sea Ice Concentration Mean, Std", "runtime_requirements": { "python3": [ @@ -24,11 +24,15 @@ }, "dimensions": { "lat": { - "standard_name": "latitude" - }, + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, "lon": { - "standard_name": "longitude" - }, + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": { "standard_name": "time" } @@ -36,6 +40,7 @@ "varlist": { "siconc": { "standard_name": "sea_ice_area_fraction", + "realm": "seaIce", "frequency": "mon", "units": "%", "dimensions": [ diff --git a/diagnostics/stc_annular_modes/make_rean_pod-data.py b/diagnostics/stc_annular_modes/make_rean_pod-data.py index 77b00fe4d..7ca19a182 100644 --- a/diagnostics/stc_annular_modes/make_rean_pod-data.py +++ b/diagnostics/stc_annular_modes/make_rean_pod-data.py @@ -1,13 +1,11 @@ import os - import xarray as xr - from stc_annular_modes_calc import eof_annular_mode, anomalize_geohgt out_dir = os.environ['DATA_OUTPUT_DIR'] -### BEGIN: READ INPUT FIELDS ### +# BEGIN: READ INPUT FIELDS ### # The following code/paths will have to be adapted for your own system. # Data provided for the stc_annular_modes POD of MDTF was originally # derived from ERA5 reanalysis zonal mean geopotential heights diff --git a/diagnostics/stc_annular_modes/settings.jsonc b/diagnostics/stc_annular_modes/settings.jsonc index c08974ac9..50f12df36 100644 --- a/diagnostics/stc_annular_modes/settings.jsonc +++ b/diagnostics/stc_annular_modes/settings.jsonc @@ -7,7 +7,7 @@ "settings" : { "driver" : "stc_annular_modes.py", "long_name" : "Annular Mode Coupling", - "realm" : "atmos", + "convention" : "cmip", "description" : "Assess the representation of zonal mean annular mode coupling between the troposphere and stratosphere", "pod_env_vars" : { // The first and last years of the obs data to use. @@ -42,7 +42,11 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -54,6 +58,7 @@ "varlist": { "zg": { "standard_name" : "geopotential_height", + "realm": "atmos", "units" : "m", "frequency": "day", "dimensions": ["time", "lev", "lat"] // note the lack of "lon" -- this should be zonal mean data! diff --git a/diagnostics/stc_annular_modes/stc_annular_modes.html b/diagnostics/stc_annular_modes/stc_annular_modes.html index d14028ad2..38bdcfc6e 100644 --- a/diagnostics/stc_annular_modes/stc_annular_modes.html +++ b/diagnostics/stc_annular_modes/stc_annular_modes.html @@ -31,7 +31,7 @@

{{CASENAME}}

-Annular Mode Interannual Variability +< color=navy>Annular Mode Interannual Variability

These plots show the interannual standard deviation of the annular mode indices as a function of day of year.

@@ -74,7 +74,7 @@

{{CASENAME}}

-Annular Mode Predictability +< color=navy>Annular Mode Predictability

These plots show the predictability of the annular mode at a tropospheric pressure level (by default 850 hPa, but this value is configurable in the POD settings.jsonc file). They depict the fraction of the variance of the 10-40 days ahead mean annular mode index for @@ -95,4 +95,4 @@

{{CASENAME}}

SH Model Composites SH Reanalysis Composites
\ No newline at end of file + diff --git a/diagnostics/stc_annular_modes/stc_annular_modes.py b/diagnostics/stc_annular_modes/stc_annular_modes.py index 8beefc280..f08cc0061 100644 --- a/diagnostics/stc_annular_modes/stc_annular_modes.py +++ b/diagnostics/stc_annular_modes/stc_annular_modes.py @@ -80,7 +80,6 @@ import os import traceback - import xarray as xr import matplotlib as mpl @@ -129,7 +128,7 @@ def make_tseries(am, which, data_name, out_dir, first, last): # Iterate over each hemisphere hemis = {"S": -1, "N": 1} - ### BEGIN MODEL DIAGNOSTIC CODEBLOCK ### + # BEGIN MODEL DIAGNOSTIC CODEBLOCK ### for hemi, hn in hemis.items(): # E-FOLDING TIMESCALES print(f"*** Computing the {which} {hemi}AM e-folding timescales") @@ -183,9 +182,9 @@ def make_tseries(am, which, data_name, out_dir, first, last): # Parse MDTF-set environment variables print("*** Parse MDTF-set environment variables ...") CASENAME = os.environ["CASENAME"] -FIRSTYR = int(os.environ["FIRSTYR"]) -LASTYR = int(os.environ["LASTYR"]) -WK_DIR = os.environ["WK_DIR"] +FIRSTYR = int(os.environ["startdate"]) +LASTYR = int(os.environ["enddate"]) +WK_DIR = os.environ["WORK_DIR"] OBS_DATA = os.environ["OBS_DATA"] # Input and output files/directories diff --git a/diagnostics/stc_annular_modes/stc_annular_modes_plot.py b/diagnostics/stc_annular_modes/stc_annular_modes_plot.py index 9d3af2730..0387aa700 100644 --- a/diagnostics/stc_annular_modes/stc_annular_modes_plot.py +++ b/diagnostics/stc_annular_modes/stc_annular_modes_plot.py @@ -56,18 +56,18 @@ def _doy_fig_params(diag): """ - if (diag == 'eftscale'): + if diag == 'eftscale': clevs = [5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 24, 28, 32, 36, 40, 48, 56, 64, 72, 80, 88] clines = clevs+[96, 104, 112, 120, 130, 140, 150] cbar_label = 'e-folding timescale [days]' csfmt = '%d' - elif (diag == 'interannstdv'): + elif diag == 'interannstdv': clevs = np.linspace(0, 2, 21) clines = list(clevs)+[2.2, 2.4, 2.6, 2.8, 3.0, 3.5, 4.0] cbar_label = 'Interannual Std. Deviation' csfmt = '%0.1f' - elif (diag == 'predictability'): + elif diag == 'predictability': clevs = np.linspace(0, 0.4, 21) clines = list(clevs)+[0.42, 0.44, 0.46, 0.48, 0.5, 0.55, 0.6, 0.65, 0.70] @@ -87,7 +87,7 @@ def _doy_fig_params(diag): # make sure user inputs a valid diagnostic diag_options = ['eftscale', 'interannstdv', 'predictability'] - if (diag not in diag_options): + if diag not in diag_options: msg = f'diag must be one of {diag_options}' raise ValueError(msg) @@ -98,12 +98,12 @@ def _doy_fig_params(diag): # centered in the middle of the plot, but the position of Jan 1 and other # months varies based on the underlying calendar of the data max_doy = int(dat.dayofyear.max()) - if (max_doy == 365): + if max_doy == 365: # July 1 falls on DOY 182 roll_to = -181 xticks = np.array([1, 32, 63, 93, 124, 154, 185, 213, 244, 274, 305, 335, 365]) - elif (max_doy == 360): + elif max_doy == 360: # July 1 falls on DOY 181 roll_to = -180 xticks = np.array([1, 31, 61, 91, 121, 151, 181, diff --git a/diagnostics/stc_eddy_heat_fluxes/make_era5-rean_pod-data.py b/diagnostics/stc_eddy_heat_fluxes/make_era5-rean_pod-data.py index e7ce442a8..aaf949080 100644 --- a/diagnostics/stc_eddy_heat_fluxes/make_era5-rean_pod-data.py +++ b/diagnostics/stc_eddy_heat_fluxes/make_era5-rean_pod-data.py @@ -22,7 +22,7 @@ import numpy as np import xarray as xr -### BEGIN: READ INPUT FIELDS ### +# BEGIN: READ INPUT FIELDS ### # The following code/paths will have to be adapted for your own system. # # On my system, the monthly-mean variables are contained in individual @@ -42,7 +42,7 @@ # should each contain all available months of meridional wind, # air temperature and geopotential height, respectively. They can be # lazily loaded with xarray (e.g., after using open_mfdataset) -### END: READ INPUT FIELDS ### +# END: READ INPUT FIELDS ### ehf = [] @@ -65,7 +65,7 @@ zg = hgt_ds.hgt.sel(time=time).load() # Compute zonal mean temperatures - ta_zm_50_tmp = t50.mean('lon') + ta_zm_50_tmp = t50.mean('lon') ta_zm_100_tmp = t100.mean('lon') # Compute zonal mean eddy heat flux diff --git a/diagnostics/stc_eddy_heat_fluxes/settings.jsonc b/diagnostics/stc_eddy_heat_fluxes/settings.jsonc index c83feec2d..f91d9054f 100644 --- a/diagnostics/stc_eddy_heat_fluxes/settings.jsonc +++ b/diagnostics/stc_eddy_heat_fluxes/settings.jsonc @@ -9,6 +9,7 @@ "driver" : "stc_eddy_heat_fluxes.py", "long_name" : "Upward Coupling of Vertically Propagating Planetary Waves", "realm" : "atmos", + "convention" : "cmip", "description" : "Assess the influence of wave driving on the polar stratosphere", "pod_env_vars" : { // Lower latitude limit for heat flux lat band avgs (defaults to 45) @@ -28,8 +29,16 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -41,6 +50,7 @@ "varlist": { "v100": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "frequency": "mon", "dimensions": ["time", "lat", "lon"], @@ -49,6 +59,7 @@ }, "t100": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "frequency": "mon", "dimensions": ["time", "lat", "lon"], @@ -57,6 +68,7 @@ }, "t50": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "frequency": "mon", "dimensions": ["time", "lat", "lon"], @@ -65,6 +77,7 @@ }, "va": { "standard_name" : "northward_wind", + "realm": "atmos", "units" : "m s-1", "frequency": "mon", "dimensions": ["time", "lev", "lat", "lon"], @@ -72,6 +85,7 @@ }, "ta": { "standard_name" : "air_temperature", + "realm": "atmos", "units" : "K", "frequency": "mon", "dimensions": ["time", "lev", "lat", "lon"], @@ -79,6 +93,7 @@ }, "zg": { "standard_name" : "geopotential_height", + "realm": "atmos", "units" : "m", "frequency": "mon", "dimensions": ["time", "lev", "lat", "lon"] diff --git a/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.html b/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.html index 2d995afc5..c7ca5f3cc 100644 --- a/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.html +++ b/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.html @@ -29,7 +29,7 @@

Influence of vertically propagating waves on the extratropical stratosphere<

{{CASENAME}}

-Eddy Heat Flux vs Polar Cap Temperatures +< color=navy>Eddy Heat Flux vs Polar Cap Temperatures
Model @@ -49,7 +49,7 @@

{{CASENAME}}

-Eddy Heat Flux vs Polar Cap Height Lag Correlations +< color=navy>Eddy Heat Flux vs Polar Cap Height Lag Correlations
Model diff --git a/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.py b/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.py index 8747e5ac8..bc9e3c568 100644 --- a/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.py +++ b/diagnostics/stc_eddy_heat_fluxes/stc_eddy_heat_fluxes.py @@ -414,9 +414,9 @@ def _align_month_corrs(ehf, zpc, month, hemi): # Parse MDTF-set environment variables print('*** Parse MDTF-set environment variables ...') CASENAME = os.environ['CASENAME'] -FIRSTYR = os.environ['FIRSTYR'] -LASTYR = os.environ['LASTYR'] -WK_DIR = os.environ['WK_DIR'] +FIRSTYR = os.environ['startdate'] +LASTYR = os.environ['enddate'] +WK_DIR = os.environ['WORK_DIR'] OBS_DATA = os.environ['OBS_DATA'] vfi = os.environ['V100_FILE'] @@ -427,22 +427,22 @@ def _align_month_corrs(ehf, zpc, month, hemi): # Parse POD-specific environment variables print('*** Parse POD-specific environment variables ...') -EHF_LO_LAT = int(os.environ['HEAT_FLUX_LO_LAT']) -EHF_HI_LAT = int(os.environ['HEAT_FLUX_HI_LAT']) +EHF_LO_LAT = int(os.environ['HEAT_FLUX_LO_LAT']) +EHF_HI_LAT = int(os.environ['HEAT_FLUX_HI_LAT']) PCAP_LO_LAT = int(os.environ['PCAP_LO_LAT']) # Do error-checking on these environment variables. Rather than trying to # correct the values, we throw errors so that users can adjust their config # files in the appropriate manner, and obtain expected results. -if (EHF_LO_LAT >= EHF_HI_LAT): +if EHF_LO_LAT >= EHF_HI_LAT: msg = 'EHF_LO_LAT must be less than EHF_HI_LAT, and both must be >= 30' raise ValueError(msg) -if (EHF_LO_LAT < 30): +if EHF_LO_LAT < 30: msg = 'EHF_LO_LAT must be >= 30' raise ValueError(msg) -if (PCAP_LO_LAT < 30): +if PCAP_LO_LAT < 30: msg = 'PCAP_LO_LAT must be >= 30' raise ValueError(msg) @@ -458,9 +458,10 @@ def _align_month_corrs(ehf, zpc, month, hemi): print(' t50') t50 = xr.open_dataset(t50fi)['t50'] t_lev_fis = True -except: - print('Unable to read individal prs level ta files; querying 4D fields') +except Exception as exc: + print('Unable to read individual prs level ta files; querying 4D fields') print(' ta') + print(exc) ta = xr.open_dataset(tfi)['ta'] t50 = ta.sel(lev=50) t100 = ta.sel(lev=100) @@ -486,16 +487,17 @@ def _align_month_corrs(ehf, zpc, month, hemi): print('*** Computing polar cap averages of 50 hPa temperature') ta_pcap = {} -ta_zm_50 = t50.mean('lon') +ta_zm_50 = t50.mean('lon') ta_pcap['NH'] = lat_avg(ta_zm_50, PCAP_LO_LAT, 90) ta_pcap['SH'] = lat_avg(ta_zm_50, -90, -PCAP_LO_LAT) # At this point, no longer need the raw data + v100 = v100.close() -zg = zg.close() -if (t_lev_fis is True): +zg = zg.close() +if t_lev_fis: t100 = t100.close() - t50 = t50.close() + t50 = t50.close() else: ta = ta.close() @@ -504,7 +506,7 @@ def _align_month_corrs(ehf, zpc, month, hemi): for hemi in ['NH','SH']: print(f'*** Plotting {hemi} EHF vs polar cap T scatter plot') scatter_plot = f'{plot_dir}/{CASENAME}_{hemi}_EHF-Tpcap_Scatter.eps' - fig,ax = plot_ehf_tcap_corr(ehf_band[hemi], ta_pcap[hemi], hemi) + fig, ax = plot_ehf_tcap_corr(ehf_band[hemi], ta_pcap[hemi], hemi) ax.set_title(f'{CASENAME}\n{hemi}, {FIRSTYR}-{LASTYR}', fontsize=20) fig.savefig(scatter_plot) @@ -521,12 +523,12 @@ def _align_month_corrs(ehf, zpc, month, hemi): outfile = data_dir+f'/{CASENAME}_eddy-heat-flux_diagnostics.nc' # Prepare the output variables and their metadata -zg_pcap = xr.concat([zg_pcap['SH'], zg_pcap['NH']], dim='hemi') +zg_pcap = xr.concat([zg_pcap['SH'], zg_pcap['NH']], dim='hemi') zg_pcap.name = 'zg_pcap' zg_pcap.attrs['units'] = 'm' zg_pcap.attrs['long_name'] = f'{PCAP_LO_LAT}-90 polar cap geopotential height' -ta_pcap = xr.concat([ta_pcap['SH'], ta_pcap['NH']], dim='hemi') +ta_pcap = xr.concat([ta_pcap['SH'], ta_pcap['NH']], dim='hemi') ta_pcap.name = 'ta_pcap_50' ta_pcap.attrs['units'] = 'K' ta_pcap.attrs['long_name'] = f'50 hPa {PCAP_LO_LAT}-90 polar cap temperature' @@ -580,19 +582,20 @@ def _align_month_corrs(ehf, zpc, month, hemi): for hemi in ['NH','SH']: print(f'*** Plotting {hemi} EHF vs polar cap T scatter plot from obs') scatter_plot = f'{plot_dir}/obs_{hemi}_EHF-Tpcap_Scatter.eps' - fig,ax = plot_ehf_tcap_corr(ehf_band[hemi], ta_pcap[hemi], hemi) + fig, ax = plot_ehf_tcap_corr(ehf_band[hemi], ta_pcap[hemi], hemi) ax.set_title(f'{rean}\n{hemi}, {obs_firstyr}-{obs_lastyr}', fontsize=20) fig.savefig(scatter_plot) print(f'*** Plotting {hemi} EHF vs polar cap Z lag correlations from obs') levcorr_plot = f'{plot_dir}/obs_{hemi}_EHF-Zpcap_LagCorr.eps' - fig,ax = plot_ehf_zcap_lags(ehf_band[hemi], zg_pcap[hemi], hemi) + fig, ax = plot_ehf_zcap_lags(ehf_band[hemi], zg_pcap[hemi], hemi) plt.suptitle(f'{rean}, {hemi}, {obs_firstyr}-{obs_lastyr}', fontsize=20) fig.savefig(levcorr_plot) -except: +except Exception as exc: print('*** Unable to create plots from the observational data: ') print(traceback.format_exc()) + print(exc) print('\n=====================================') print('END stc_eddy_heat_fluxes.py ') diff --git a/diagnostics/stc_ozone/settings.jsonc b/diagnostics/stc_ozone/settings.jsonc index 7a8760b83..17f583296 100644 --- a/diagnostics/stc_ozone/settings.jsonc +++ b/diagnostics/stc_ozone/settings.jsonc @@ -8,7 +8,7 @@ "settings" : { "driver" : "stc_ozone.py", "long_name" : "Stratospheric Ozone and Circulation", - "realm" : "atmos", + "convention" : "cmip", "description" : "Assess the relationships between spring stratospheric ozone and circulation", "pod_env_vars" : { // Lower latitude limit for zonal wind lat band avgs (defaults to 50) @@ -28,8 +28,16 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "Pa", @@ -41,6 +49,7 @@ "varlist": { "ua": { "standard_name" : "eastward_wind", + "realm": "atmos", "units" : "m s-1", "frequency": "mon", "dimensions": ["time", "lev", "lat", "lon"], @@ -48,6 +57,7 @@ }, "ta": { "standard_name" : "air_temperature", + "realm": "atmos", "units" : "K", "frequency": "mon", "dimensions": ["time", "lev", "lat", "lon"], @@ -55,6 +65,7 @@ }, "o3": { "standard_name" : "mole_fraction_of_ozone_in_air", + "realm": "atmos", "use_exact_name": true, "units" : "mol mol-1", "frequency": "mon", diff --git a/diagnostics/stc_ozone/stc_ozone.html b/diagnostics/stc_ozone/stc_ozone.html index 9f58a0cd4..4b98707d4 100644 --- a/diagnostics/stc_ozone/stc_ozone.html +++ b/diagnostics/stc_ozone/stc_ozone.html @@ -31,7 +31,7 @@

Relationship of stratospheric ozone with stratospheric and tropospheric circ

{{CASENAME}}

-Early spring polar cap ozone vs late spring zonal winds +< color=navy>Early spring polar cap ozone vs late spring zonal winds
Model @@ -51,7 +51,7 @@

{{CASENAME}}

-Early spring polar cap ozone vs final stratospheric warming Day of Year (DOY) +< color=navy>Early spring polar cap ozone vs final stratospheric warming Day of Year (DOY)
Model @@ -71,7 +71,7 @@

{{CASENAME}}

-Spring polar cap ozone lag correlated to zonal winds +< color=navy>Spring polar cap ozone lag correlated to zonal winds -
Model @@ -91,7 +91,7 @@

{{CASENAME}}

-Trends during ozone depletion and recovery eras +< color=navy>Trends during ozone depletion and recovery eras
Model diff --git a/diagnostics/stc_ozone/stc_ozone.py b/diagnostics/stc_ozone/stc_ozone.py index 597e3e406..f54bb793c 100644 --- a/diagnostics/stc_ozone/stc_ozone.py +++ b/diagnostics/stc_ozone/stc_ozone.py @@ -169,9 +169,9 @@ def plot_o3_ustrat_corr(uzm_bnd, o3_pcap, hemi): fig, ax = plt.subplots() - if (hemi == 'NH'): + if hemi == 'NH': # Need FMA polar cap ozone at 50 hPa - xlab_str = f"50 hPa FMA O3"+\ + xlab_str = f"50 hPa FMA O3" + \ f"({PCAP_LO_LAT}-90N), [ppmv]" o3_seas = o3_pcap.sel(lev=50).resample(time='QS-FEB').mean('time') o3_seas = o3_seas.where(o3_seas.time.dt.month == 2, drop=True) @@ -181,7 +181,7 @@ def plot_o3_ustrat_corr(uzm_bnd, o3_pcap, hemi): uzm_seas = uzm_bnd.sel(lev=50).resample(time='QS-MAR').mean('time') uzm_seas = uzm_seas.where(uzm_seas.time.dt.month == 3, drop=True) - elif (hemi == 'SH'): + elif hemi == 'SH': # Need SON polar cap ozone at 50 hPa xlab_str = f"50 hPa SON O3 "+\ f"({PCAP_LO_LAT}-90S), [ppmv]" @@ -229,17 +229,18 @@ def plot_o3_ustrat_corr(uzm_bnd, o3_pcap, hemi): r = np.corrcoef(o3_seas.isel(time=ixs).values, uzm_seas.isel(time=ixs).values)[0,1] corr_bs.append(r) - bs_lo,bs_hi = np.percentile(corr_bs, [2.5, 97.5]) + bs_lo, bs_hi = np.percentile(corr_bs, [2.5, 97.5]) # display the correlation and 95% bootstrap CI plt.text(0.45,0.88, f'r={corr:.3f} ({bs_lo:.3f}, {bs_hi:.3f})', transform=ax.transAxes, fontsize=16, color='red', fontweight='semibold') - fig.subplots_adjust(left=0.1,right=0.98) - fig.set_size_inches(6.5,6.5) + fig.subplots_adjust(left=0.1, right=0.98) + fig.set_size_inches(6.5, 6.5) + + return fig, ax - return (fig,ax) def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): r""" Create a scatterplot showing the relationship between 50 mb @@ -280,9 +281,9 @@ def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): """ fig, ax = plt.subplots() - if (hemi == 'NH'): + if hemi == 'NH': # Need FMA polar cap ozone at 50 hPa - xlab_str = f"50 hPa FMA O3"+\ + xlab_str = f"50 hPa FMA O3" + \ f"({PCAP_LO_LAT}-90N), [ppmv]" o3_seas = o3_pcap.sel(lev=50).resample(time='QS-FEB').mean('time') o3_seas = o3_seas.where(o3_seas.time.dt.month == 2, drop=True) @@ -297,17 +298,17 @@ def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): if n == 'NaN': aDate = float("NaN") else: - aDate = datetime.date.fromisoformat(n).timetuple().tm_yday + aDate = datetime.date.fromisoformat(n).timetuple().tm_yday doyn.append(aDate) - doy=np.array(doyn,dtype='float') + doy = np.array(doyn, dtype='float') with open(filepath, 'w') as file_handler: for item in fsw: file_handler.write(f"{item}\n") - elif (hemi == 'SH'): + elif hemi == 'SH': # Need SON polar cap ozone at 50 hPa - xlab_str = f"50 hPa SON O3"+\ + xlab_str = f"50 hPa SON O3" + \ f"({PCAP_LO_LAT}-90S), [ppmv]" o3_seas = o3_pcap.sel(lev=50).resample(time='QS-SEP').mean('time') o3_seas = o3_seas.where(o3_seas.time.dt.month == 9, drop=True) @@ -324,7 +325,7 @@ def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): else: aDate = datetime.date.fromisoformat(n).timetuple().tm_yday doyn.append(aDate) - doy=np.array(doyn,dtype='float') + doy = np.array(doyn, dtype='float') with open(filepath, 'w') as file_handler: for item in fsw: @@ -336,12 +337,12 @@ def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): # Determine plot axes from the data xlims = (np.round(o3_seas.min())-1, np.round(o3_seas.max())+1) - if (hemi == 'NH'): + if hemi == 'NH': ylims = (np.nanmin(doy)-3, np.nanmax(doy).max()+3) - elif (hemi == 'SH'): - for i,n in enumerate(doy): + elif hemi == 'SH': + for i, n in enumerate(doy): if n < 180: - doy[i] = doy[i]+365 #note, this doesn't account for leap-years + doy[i] = doy[i] + 365 # note, this doesn't account for leap-years ylims = (np.round(np.nanmin(doy)-3), np.round(np.nanmax(doy).max()+3)) # Set plot limits, add labels, and make axis square @@ -350,40 +351,40 @@ def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): plt.xlabel(xlab_str, fontsize=18) plt.ylabel(ylab_str, fontsize=18) ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box') - if (hemi == 'SH'): - y_pos = np.arange(np.round(ylims[0]),np.round(ylims[1])+10,10) + if hemi == 'SH': + y_pos = np.arange(np.round(ylims[0]), np.round(ylims[1])+10, 10) ax.set_yticks(y_pos) - y_pos2 = np.where(y_pos <= 365,y_pos,y_pos-365) + y_pos2 = np.where(y_pos <= 365, y_pos, y_pos-365) ax.set_yticklabels(y_pos2) # Plot ranges of +/- 1 std and mean - plt.vlines(o3_seas.mean(),ylims[0],ylims[1],color='gainsboro',linewidth=0.66) - plt.hlines(np.nanmean(doy),xlims[0],xlims[1],color='gainsboro',linewidth=0.66) - plt.axvspan(o3_seas.mean()-o3_seas.std(), o3_seas.mean()+o3_seas.std(),color='whitesmoke') - plt.axhspan(np.nanmean(doy)-np.nanstd(doy), np.nanmean(doy)+np.nanstd(doy),color='whitesmoke') - ax.scatter(o3_seas.values, doy, c='dimgrey', s=16,zorder=100) + plt.vlines(o3_seas.mean(), ylims[0], ylims[1], color='gainsboro', linewidth=0.66) + plt.hlines(np.nanmean(doy), xlims[0], xlims[1], color='gainsboro', linewidth=0.66) + plt.axvspan(o3_seas.mean()-o3_seas.std(), o3_seas.mean()+o3_seas.std(), color='whitesmoke') + plt.axhspan(np.nanmean(doy)-np.nanstd(doy), np.nanmean(doy) + np.nanstd(doy), color='whitesmoke') + ax.scatter(o3_seas.values, doy, c='dimgrey', s=16, zorder=100) # Get the correlation and do bootstrapping to determine its 95% CI a=ma.masked_invalid(o3_seas.values) b=ma.masked_invalid(doy) msk = (~a.mask & ~b.mask) - corr = ma.corrcoef(a[msk],b[msk])[0,1] + corr = ma.corrcoef(a[msk], b[msk])[0, 1] # Get the best-fit line and plot it - m,yo,r,p,std_err = linregress(a[msk],b[msk]) - x = np.linspace(xlims[0],xlims[1]) - plt.plot(x,m*x+yo, color='black', linestyle='--', linewidth=0.66) + m, yo, r, p, std_err = linregress(a[msk], b[msk]) + x = np.linspace(xlims[0], xlims[1]) + plt.plot(x, m*x+yo, color='black', linestyle='--', linewidth=0.66) nbs = 1000 corr_bs = [] for n in range(nbs): ixs = np.random.choice(o3_seas.size, size=o3_seas.size) - a=ma.masked_invalid(o3_seas.isel(time=ixs).values) - b=ma.masked_invalid(doy[ixs]) + a = ma.masked_invalid(o3_seas.isel(time=ixs).values) + b = ma.masked_invalid(doy[ixs]) msk = (~a.mask & ~b.mask) - r = ma.corrcoef(a[msk],b[msk])[0,1] + r = ma.corrcoef(a[msk], b[msk])[0, 1] corr_bs.append(r) - bs_lo,bs_hi = np.nanpercentile(corr_bs, [2.5, 97.5]) + bs_lo, bs_hi = np.nanpercentile(corr_bs, [2.5, 97.5]) # display the correlation and 95% bootstrap CI plt.text(0.05,0.08, f'r={corr:.3f} ({bs_lo:.3f}, {bs_hi:.3f})', @@ -393,7 +394,7 @@ def plot_o3_fsw_corr(uzm_50, o3_pcap, hemi, filepath): fig.subplots_adjust(left=0.1,right=0.98) fig.set_size_inches(6.5,6.5) - return (fig,ax) + return fig, ax def plot_o3_uwnd_lev_lags(uzm_bnd, o3_pcap, hemi): @@ -433,12 +434,12 @@ def plot_o3_uwnd_lev_lags(uzm_bnd, o3_pcap, hemi): """ - if (hemi == 'NH'): + if hemi == 'NH': # Need April 50 mb polar cap ozone mon_origin = 'Apr' o3_early = o3_pcap.sel(lev=50).where(o3_pcap.time.dt.month == 4, drop=True) months = [2, 3, 4, 5, 6] - elif (hemi == 'SH'): + elif hemi == 'SH': # Need October 50 mb polar cap ozone mon_origin = 'Oct' o3_early = o3_pcap.sel(lev=50).where(o3_pcap.time.dt.month == 10, drop=True) @@ -452,10 +453,10 @@ def plot_o3_uwnd_lev_lags(uzm_bnd, o3_pcap, hemi): lag_corrs = [] for mon in months: uzm_mon = uzm_bnd.where(uzm_bnd.time.dt.month == mon, drop=True) - data_mat = np.concatenate([o3_early.values[:,np.newaxis], + data_mat = np.concatenate([o3_early.values[:, np.newaxis], uzm_mon.values], axis=1) - corrs = np.corrcoef(data_mat.T)[0,1:] - lag_corrs.append(corrs[np.newaxis,...]) + corrs = np.corrcoef(data_mat.T)[0, 1:] + lag_corrs.append(corrs[np.newaxis, ...]) lag_corrs = np.concatenate(lag_corrs, axis=0) # Evaluate significance using 2-tailed t-test @@ -470,8 +471,8 @@ def plot_o3_uwnd_lev_lags(uzm_bnd, o3_pcap, hemi): xlab_str = "Month" ylab_str = "Pressure [hPa]" cbp = ax.contourf(np.arange(5), uzm_bnd.lev.values, lag_corrs.T, - levels=np.linspace(-1,1,21),cmap='RdBu_r',extend='both') - ax.contourf(np.arange(5), uzm_bnd.lev.values, ttests.T, levels=[-1,0,1], hatches=[None,'..'], colors='none') + levels=np.linspace(-1, 1, 21), cmap='RdBu_r', extend='both') + ax.contourf(np.arange(5), uzm_bnd.lev.values, ttests.T, levels=[-1, 0, 1], hatches=[None, '..'], colors='none') ax.set_yscale('log') ax.invert_yaxis() plt.xticks(np.arange(5), months) @@ -480,9 +481,9 @@ def plot_o3_uwnd_lev_lags(uzm_bnd, o3_pcap, hemi): plt.title(f'Lag correlation of Zonal-mean Zonal Wind with {mon_origin} 50 hPa polar cap ozone') plt.colorbar(cbp, format='%.1f', label='Correlation', ax=[ax], location='bottom') - fig.set_size_inches(10,6) + fig.set_size_inches(10, 6) - return (fig,ax) + return fig, ax def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', @@ -545,20 +546,20 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', t_tr_late, t_p_late = l_trend(t_pcap,start_year2, end_year2) u_tr_late, u_p_late = l_trend(uzm_bnd,start_year2, end_year2) - #Shift around so that it goes JASONDJFMAMJ instead - t_tr_shift_early = xr.concat([t_tr_early[6:,:],t_tr_early[0:6,:]],dim="month") - u_tr_shift_early = xr.concat([u_tr_early[6:,:],u_tr_early[0:6,:]],dim="month") - o3_tr_shift_early = xr.concat([o3_tr_early[6:,:],o3_tr_early[0:6,:]],dim="month") - o3_p_shift_early = xr.concat([o3_p_early[6:,:],o3_p_early[0:6,:]],dim="month") - t_p_shift_early = xr.concat([t_p_early[6:,:],t_p_early[0:6,:]],dim="month") - u_p_shift_early = xr.concat([u_p_early[6:,:],u_p_early[0:6,:]],dim="month") + # Shift around so that it goes JASONDJFMAMJ instead + t_tr_shift_early = xr.concat([t_tr_early[6:, :], t_tr_early[0:6, :]], dim="month") + u_tr_shift_early = xr.concat([u_tr_early[6:, :], u_tr_early[0:6, :]], dim="month") + o3_tr_shift_early = xr.concat([o3_tr_early[6:, :], o3_tr_early[0:6, :]], dim="month") + o3_p_shift_early = xr.concat([o3_p_early[6:, :], o3_p_early[0:6, :]], dim="month") + t_p_shift_early = xr.concat([t_p_early[6:, :], t_p_early[0:6, :]], dim="month") + u_p_shift_early = xr.concat([u_p_early[6:, :], u_p_early[0:6, :]], dim="month") - t_tr_shift_late = xr.concat([t_tr_late[6:,:],t_tr_late[0:6,:]],dim="month") - u_tr_shift_late = xr.concat([u_tr_late[6:,:],u_tr_late[0:6,:]],dim="month") - o3_tr_shift_late = xr.concat([o3_tr_late[6:,:],o3_tr_late[0:6,:]],dim="month") - o3_p_shift_late = xr.concat([o3_p_late[6:,:],o3_p_late[0:6,:]],dim="month") - t_p_shift_late = xr.concat([t_p_late[6:,:],t_p_late[0:6,:]],dim="month") - u_p_shift_late = xr.concat([u_p_late[6:,:],u_p_late[0:6,:]],dim="month") + t_tr_shift_late = xr.concat([t_tr_late[6:, :], t_tr_late[0:6, :]], dim="month") + u_tr_shift_late = xr.concat([u_tr_late[6:, :], u_tr_late[0:6, :]], dim="month") + o3_tr_shift_late = xr.concat([o3_tr_late[6:, :], o3_tr_late[0:6, :]], dim="month") + o3_p_shift_late = xr.concat([o3_p_late[6:, :], o3_p_late[0:6, :]], dim="month") + t_p_shift_late = xr.concat([t_p_late[6:, :], t_p_late[0:6, :]], dim="month") + u_p_shift_late = xr.concat([u_p_late[6:, :], u_p_late[0:6, :]], dim="month") fig, axs = plt.subplots(2, 3, figsize=(12, 10)) @@ -566,101 +567,102 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', ylab_str = "Pressure (hPa)" # Top row shows trends from period of ozone depletion - cbp = axs[0,0].contourf(np.arange(12),t_tr_shift_early.lev,t_tr_shift_early.transpose(), - levels=np.linspace(-5,5,11),cmap='RdBu_r',extend='both') - axs[0,0].contourf(np.arange(12),t_p_shift_early.lev,t_p_shift_early.transpose(), - levels=[0.05,0.95],hatches=['..'], colors='none') - axs[0,0].set_yscale('log') - axs[0,0].invert_yaxis() - axs[0,0].set_ylim([1000,10]) + cbp = axs[0, 0].contourf(np.arange(12), t_tr_shift_early.lev, t_tr_shift_early.transpose(), + levels=np.linspace(-5, 5, 11), cmap='RdBu_r', extend='both') + axs[0, 0].contourf(np.arange(12), t_p_shift_early.lev, t_p_shift_early.transpose(), + levels=[0.05, 0.95], hatches=['..'], colors='none') + axs[0, 0].set_yscale('log') + axs[0, 0].invert_yaxis() + axs[0, 0].set_ylim([1000, 10]) - axs[0,0].set_xticks(np.arange(12)) - axs[0,0].set_xticklabels(['J','A','S','O','N','D','J','F','M','A','M','J']) - axs[0,0].set(xlabel=xlab_str, ylabel=ylab_str, title=f'Polar cap Temperature Trends \n'+start_year1+'-'+end_year1) - plt.colorbar(cbp, format='%.1f',label='[K/decade]', ax=[axs[0,0]], location='bottom') + axs[0, 0].set_xticks(np.arange(12)) + axs[0, 0].set_xticklabels(['J', 'A', 'S', 'O', 'N', 'D', 'J', 'F', 'M', 'A', 'M', 'J']) + axs[0, 0].set(xlabel=xlab_str, ylabel=ylab_str, title=f'Polar cap Temperature Trends \n'+start_year1+'-'+end_year1) + plt.colorbar(cbp, format='%.1f', label='[K/decade]', ax=[axs[0, 0]], location='bottom') - cbp2 = axs[0,1].contourf(np.arange(12),u_tr_shift_early.lev,u_tr_shift_early.transpose(), - levels=np.linspace(-5,5,11),cmap='RdBu_r',extend='both') - axs[0,1].contourf(np.arange(12),u_p_shift_early.lev,u_p_shift_early.transpose(), - levels=[0.05,0.95],hatches=['..'], colors='none') - axs[0,1].set_yscale('log') - axs[0,1].invert_yaxis() - axs[0,1].set_ylim([1000,10]) + cbp2 = axs[0, 1].contourf(np.arange(12), u_tr_shift_early.lev, u_tr_shift_early.transpose(), + levels=np.linspace(-5, 5, 11), cmap='RdBu_r', extend='both') + axs[0, 1].contourf(np.arange(12), u_p_shift_early.lev, u_p_shift_early.transpose(), + levels=[0.05, 0.95], hatches=['..'], colors='none') + axs[0, 1].set_yscale('log') + axs[0, 1].invert_yaxis() + axs[0, 1].set_ylim([1000, 10]) - axs[0,1].set_xticks(np.arange(12)) - axs[0,1].set_xticklabels(['J','A','S','O','N','D','J','F','M','A','M','J']) - axs[0,1].set(xlabel=xlab_str, title=f'Zonal-mean Zonal Wind Trends \n'+start_year1+'-'+end_year1) - plt.colorbar(cbp2, format='%.1f',label='[m/s per decade]', ax=[axs[0,1]], location='bottom') + axs[0, 1].set_xticks(np.arange(12)) + axs[0, 1].set_xticklabels(['J', 'A', 'S', 'O', 'N', 'D', 'J', 'F', 'M', 'A', 'M', 'J']) + axs[0, 1].set(xlabel=xlab_str, title=f'Zonal-mean Zonal Wind Trends \n'+start_year1+'-'+end_year1) + plt.colorbar(cbp2, format='%.1f', label='[m/s per decade]', ax=[axs[0, 1]], location='bottom') - cbp3 = axs[0,2].contourf(np.arange(12),o3_tr_shift_early.lev,o3_tr_shift_early.transpose(), - levels=np.linspace(-1,1,11),cmap='RdBu_r',extend='both') - axs[0,2].contourf(np.arange(12),o3_p_shift_early.lev,o3_p_shift_early.transpose(), - levels=[0.05,0.95],hatches=['..'], colors='none') - axs[0,2].set_yscale('log') - axs[0,2].invert_yaxis() - axs[0,2].set_ylim([1000,10]) + cbp3 = axs[0, 2].contourf(np.arange(12), o3_tr_shift_early.lev, o3_tr_shift_early.transpose(), + levels=np.linspace(-1, 1, 11), cmap='RdBu_r', extend='both') + axs[0, 2].contourf(np.arange(12), o3_p_shift_early.lev, o3_p_shift_early.transpose(), + levels=[0.05, 0.95], hatches=['..'], colors='none') + axs[0, 2].set_yscale('log') + axs[0, 2].invert_yaxis() + axs[0, 2].set_ylim([1000, 10]) - axs[0,2].set_xticks(np.arange(12)) - axs[0,2].set_xticklabels(['J','A','S','O','N','D','J','F','M','A','M','J']) - axs[0,2].set(xlabel=xlab_str, title=f'Polar Cap Ozone Trends \n'+start_year1+'-'+end_year1) - plt.colorbar(cbp3, format='%.1f',label='[ppmv/decade]', ax=[axs[0,2]], location='bottom') + axs[0, 2].set_xticks(np.arange(12)) + axs[0, 2].set_xticklabels(['J', 'A', 'S', 'O', 'N', 'D', 'J', 'F', 'M', 'A', 'M', 'J']) + axs[0, 2].set(xlabel=xlab_str, title=f'Polar Cap Ozone Trends \n'+start_year1+'-'+end_year1) + plt.colorbar(cbp3, format='%.1f', label='[ppmv/decade]', ax=[axs[0, 2]], location='bottom') # Bottom row shows trends from period of ozone recovery - cbp = axs[1,0].contourf(np.arange(12),t_tr_shift_late.lev,t_tr_shift_late.transpose(), - levels=np.linspace(-5,5,11),cmap='RdBu_r',extend='both') - axs[1,0].contourf(np.arange(12),t_p_shift_late.lev,t_p_shift_late.transpose(), - levels=[0.05,0.95],hatches=['..'], colors='none') - axs[1,0].set_yscale('log') - axs[1,0].invert_yaxis() - axs[1,0].set_ylim([1000,10]) + cbp = axs[1, 0].contourf(np.arange(12), t_tr_shift_late.lev, t_tr_shift_late.transpose(), + levels=np.linspace(-5, 5, 11), cmap='RdBu_r', extend='both') + axs[1, 0].contourf(np.arange(12), t_p_shift_late.lev, t_p_shift_late.transpose(), + levels=[0.05, 0.95] ,hatches=['..'], colors='none') + axs[1, 0].set_yscale('log') + axs[1, 0].invert_yaxis() + axs[1, 0].set_ylim([1000, 10]) - axs[1,0].set_xticks(np.arange(12)) - axs[1,0].set_xticklabels(['J','A','S','O','N','D','J','F','M','A','M','J']) - axs[1,0].set(xlabel=xlab_str, ylabel=ylab_str, title=start_year2+'-'+end_year2) - plt.colorbar(cbp, format='%.1f',label='[K/decade]', ax=[axs[1,0]], location='bottom') - - - cbp2 = axs[1,1].contourf(np.arange(12),u_tr_shift_late.lev,u_tr_shift_late.transpose(), - levels=np.linspace(-5,5,11),cmap='RdBu_r',extend='both') - axs[1,1].contourf(np.arange(12),u_p_shift_late.lev,u_p_shift_late.transpose(), - levels=[0.05,0.95],hatches=['..'], colors='none') - axs[1,1].set_yscale('log') - axs[1,1].invert_yaxis() - axs[1,1].set_ylim([1000,10]) + axs[1, 0].set_xticks(np.arange(12)) + axs[1, 0].set_xticklabels(['J', 'A', 'S', 'O', 'N', 'D', 'J', 'F', 'M', 'A', 'M',' J']) + axs[1, 0].set(xlabel=xlab_str, ylabel=ylab_str, title=start_year2+'-'+end_year2) + plt.colorbar(cbp, format='%.1f', label='[K/decade]', ax=[axs[1,0]], location='bottom') + + cbp2 = axs[1, 1].contourf(np.arange(12), u_tr_shift_late.lev, u_tr_shift_late.transpose(), + levels=np.linspace(-5, 5, 11), cmap='RdBu_r', extend='both') + axs[1, 1].contourf(np.arange(12), u_p_shift_late.lev, u_p_shift_late.transpose(), + levels=[0.05, 0.95], hatches=['..'], colors='none') + axs[1, 1].set_yscale('log') + axs[1, 1].invert_yaxis() + axs[1, 1].set_ylim([1000, 10]) - axs[1,1].set_xticks(np.arange(12)) - axs[1,1].set_xticklabels(['J','A','S','O','N','D','J','F','M','A','M','J']) - axs[1,1].set(xlabel=xlab_str, title=start_year2+'-'+end_year2) + axs[1, 1].set_xticks(np.arange(12)) + axs[1, 1].set_xticklabels(['J', 'A', 'S', 'O', 'N', 'D', 'J', 'F', 'M', 'A', 'M', 'J']) + axs[1, 1].set(xlabel=xlab_str, title=start_year2+'-'+end_year2) plt.colorbar(cbp2, format='%.1f',label='[m/s per decade]', ax=[axs[1,1]], location='bottom') - cbp3 = axs[1,2].contourf(np.arange(12),o3_tr_shift_late.lev,o3_tr_shift_late.transpose(), - levels=np.linspace(-1,1,11),cmap='RdBu_r',extend='both') - axs[1,2].contourf(np.arange(12),o3_p_shift_late.lev,o3_p_shift_late.transpose(), - levels=[0.05,0.95],hatches=['..'], colors='none') - axs[1,2].set_yscale('log') - axs[1,2].invert_yaxis() - axs[1,2].set_ylim([1000,10]) + cbp3 = axs[1, 2].contourf(np.arange(12), o3_tr_shift_late.lev, o3_tr_shift_late.transpose(), + levels=np.linspace(-1, 1, 11), cmap='RdBu_r', extend='both') + axs[1, 2].contourf(np.arange(12), o3_p_shift_late.lev,o3_p_shift_late.transpose(), + levels=[0.05, 0.95], hatches=['..'], colors='none') + axs[1, 2].set_yscale('log') + axs[1, 2].invert_yaxis() + axs[1, 2].set_ylim([1000, 10]) - axs[1,2].set_xticks(np.arange(12)) - axs[1,2].set_xticklabels(['J','A','S','O','N','D','J','F','M','A','M','J']) - axs[1,2].set(xlabel=xlab_str, title=start_year2+'-'+end_year2) - plt.colorbar(cbp3, format='%.1f',label='[ppmv/decade]', ax=[axs[1,2]], location='bottom') + axs[1, 2].set_xticks(np.arange(12)) + axs[1, 2].set_xticklabels(['J', 'A', 'S', 'O', 'N', 'D', 'J', 'F', 'M', 'A', 'M', 'J']) + axs[1, 2].set(xlabel=xlab_str, title=start_year2 + '-' + end_year2) + plt.colorbar(cbp3, format='%.1f', label='[ppmv/decade]', ax=[axs[1, 2]], location='bottom') - return (fig,axs) + return fig, axs -########################################################################## +# ######################################################################### # --- BEGIN SCRIPT --- # -########################################################################## +# ######################################################################### + + print('\n=======================================') print('BEGIN stc_ozone.py ') print('=======================================\n') -##### Parse MDTF-set environment variables +# Parse MDTF-set environment variables print('*** Parse MDTF-set environment variables ...') CASENAME = os.environ['CASENAME'] -FIRSTYR = int(os.environ['FIRSTYR']) -LASTYR = int(os.environ['LASTYR']) -WK_DIR = os.environ['WK_DIR'] +FIRSTYR = int(os.environ['startdate']) +LASTYR = int(os.environ['enddate']) +WK_DIR = os.environ['WORK_DIR'] OBS_DIR = os.environ['OBS_DATA'] o3fi = os.environ['O3_FILE'] @@ -669,22 +671,22 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', # Parse POD-specific environment variables print('*** Parse POD-specific environment variables ...') -UZM_LO_LAT = int(os.environ['UZM_LO_LAT']) -UZM_HI_LAT = int(os.environ['UZM_HI_LAT']) +UZM_LO_LAT = int(os.environ['UZM_LO_LAT']) +UZM_HI_LAT = int(os.environ['UZM_HI_LAT']) PCAP_LO_LAT = int(os.environ['PCAP_LO_LAT']) # Do error-checking on these environment variables. Rather than trying to # correct the values, we throw errors so that users can adjust their config # files in the appropriate manner, and obtain expected results. -if (UZM_LO_LAT >= UZM_HI_LAT): +if UZM_LO_LAT >= UZM_HI_LAT: msg = 'UZM_LO_LAT must be less than UZM_HI_LAT, and both must be >= 30' raise ValueError(msg) -if (UZM_LO_LAT < 30): +if UZM_LO_LAT < 30: msg = 'UZM_LO_LAT must be >= 30' raise ValueError(msg) -if (PCAP_LO_LAT < 30): +if PCAP_LO_LAT < 30: msg = 'PCAP_LO_LAT must be >= 30' raise ValueError(msg) @@ -692,7 +694,7 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', print(f'*** Now starting work on {CASENAME}\n------------------------------') print('*** Reading variables ...') print(' o3') -o3 = xr.open_dataset(o3fi,decode_cf=True)['o3'] +o3 = xr.open_dataset(o3fi, decode_cf=True)['o3'] print(' ta') ta = xr.open_dataset(tfi)['ta'] print(' ua') @@ -701,19 +703,19 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', # Compute the diagnostics (note, here we assume that all model variables are the same length in time) mod_firstyr = o3.time.dt.year.values[0] mod_lastyr = o3.time.dt.year.values[-1] -print(mod_firstyr,mod_lastyr) +print(mod_firstyr, mod_lastyr) print(f'***Limiting model data to {FIRSTYR} to {LASTYR}***') -if (FIRSTYR < mod_firstyr): +if FIRSTYR < mod_firstyr: msg = 'FIRSTYR must be >= model first year' raise ValueError(msg) -if (LASTYR > mod_lastyr): +if LASTYR > mod_lastyr: msg = 'LASTYR must be <= model last year' raise ValueError(msg) -o3s = o3.sel(time=slice(str(FIRSTYR),str(LASTYR))) -tas = ta.sel(time=slice(str(FIRSTYR),str(LASTYR))) -uas = ua.sel(time=slice(str(FIRSTYR),str(LASTYR))) +o3s = o3.sel(time=slice(str(FIRSTYR), str(LASTYR))) +tas = ta.sel(time=slice(str(FIRSTYR), str(LASTYR))) +uas = ua.sel(time=slice(str(FIRSTYR), str(LASTYR))) print(f'*** Computing zonal-means') o3zm = o3s.mean(dim="lon") @@ -730,8 +732,8 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', tzm = tzm.assign_coords({"lev": (tzm.lev/100.)}) tzm.lev.attrs['units'] = 'hPa' -print(f'*** Computing {UZM_LO_LAT}-{UZM_HI_LAT}N and '+\ - f'{UZM_LO_LAT}-{UZM_HI_LAT}S lat averages of zonal-mean zonal winds') +print(f'*** Computing {UZM_LO_LAT}-{UZM_HI_LAT}N and ' + f'{UZM_LO_LAT}-{UZM_HI_LAT}S lat averages of zonal-mean zonal winds') uzm_50 = uzm.sel(lev=50) uzm_band = {} uzm_band['NH'] = lat_avg(uzm, UZM_LO_LAT, UZM_HI_LAT) @@ -767,19 +769,19 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', print(f'*** Plotting {hemi} FSW vs polar cap O3 scatter plot') scatter_FSW = f'{plot_dir}/{CASENAME}_{hemi}_FSW-O3cap_Scatter.eps' filepath = f'{WK_DIR}/model/netCDF/{CASENAME}_{hemi}_fsw.txt' - fig,ax = plot_o3_fsw_corr(uzm_50, o3_pcap[hemi], hemi,filepath) + fig, ax = plot_o3_fsw_corr(uzm_50, o3_pcap[hemi], hemi,filepath) ax.set_title(f'{CASENAME}\n{hemi}, {FIRSTYR}-{LASTYR}', fontsize=20) fig.savefig(scatter_FSW) print(f'*** Plotting {hemi} UZM vs polar cap O3 lag correlations') levcorr_plot = f'{plot_dir}/{CASENAME}_{hemi}_UZM-O3cap_LagCorr_Lev.eps' - fig,ax = plot_o3_uwnd_lev_lags(uzm_band[hemi], o3_pcap[hemi], hemi) + fig, ax = plot_o3_uwnd_lev_lags(uzm_band[hemi], o3_pcap[hemi], hemi) plt.suptitle(f'{CASENAME}, {hemi}, {FIRSTYR}-{LASTYR}', fontsize=20) fig.savefig(levcorr_plot) print(f'*** Plotting {hemi} trends in o3, temp, and UZM') trends_plot = f'{plot_dir}/{CASENAME}_{hemi}_Trends.eps' - fig,axs = plot_o3_seas_trends(uzm_band[hemi], o3_pcap[hemi], t_pcap[hemi]) + fig, axs = plot_o3_seas_trends(uzm_band[hemi], o3_pcap[hemi], t_pcap[hemi]) fig.suptitle(f'{CASENAME}, {hemi}', fontsize=20) fig.savefig(trends_plot) @@ -790,17 +792,17 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', outfile = data_dir+f'/{CASENAME}_ozone-circ_diagnostics.nc' # Prepare the output variables and their metadata -ua_band = xr.concat([uzm_band['SH'], uzm_band['NH']], dim='hemi') +ua_band = xr.concat([uzm_band['SH'], uzm_band['NH']], dim='hemi') ua_band.name = 'ua_band' ua_band.attrs['units'] = 'm s**-1' ua_band.attrs['long_name'] = f'{UZM_LO_LAT}-{UZM_HI_LAT} lat band zonal-mean zonal wind' -ta_pcap = xr.concat([t_pcap['SH'], t_pcap['NH']], dim='hemi') +ta_pcap = xr.concat([t_pcap['SH'], t_pcap['NH']], dim='hemi') ta_pcap.name = 'ta_pcap' ta_pcap.attrs['units'] = 'K' ta_pcap.attrs['long_name'] = f'{PCAP_LO_LAT}-90 polar cap temperature' -oz_pcap = xr.concat([o3_pcap['SH'], o3_pcap['NH']], dim='hemi') +oz_pcap = xr.concat([o3_pcap['SH'], o3_pcap['NH']], dim='hemi') oz_pcap.name = 'oz_pcap' oz_pcap.attrs['units'] = 'ppmv' oz_pcap.attrs['long_name'] = f'{PCAP_LO_LAT}-90 polar cap ozone' @@ -817,7 +819,7 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', print(f'*** Saving ozone-circulation diagnostics to {outfile}') out_ds.to_netcdf(outfile, encoding=encoding) -## Loading obs data files & plotting obs figures: ########################## +# Loading obs data files & plotting obs figures: ########################## print(f'*** Now working on obs data\n------------------------------') obs_file = OBS_DIR + '/stc_ozone_obs-data.nc' @@ -854,31 +856,32 @@ def plot_o3_seas_trends(uzm_bnd, o3_pcap, t_pcap, start_year1='1979', for hemi in ['NH','SH']: print(f'*** Plotting {hemi} UZM vs polar cap O3 scatter plot from rean') scatter_plot = f'{plot_dir}/obs_{hemi}_UZM-O3cap_Scatter.eps' - fig,ax = plot_o3_ustrat_corr(uzm_band[hemi], o3_pcap[hemi], hemi) + fig, ax = plot_o3_ustrat_corr(uzm_band[hemi], o3_pcap[hemi], hemi) ax.set_title(f'{rean}\n{hemi}, {obs_firstyr}-{obs_lastyr}', fontsize=20) fig.savefig(scatter_plot) print(f'*** Plotting {hemi} FSW vs polar cap O3 scatter plot from rean') filepath = f'{WK_DIR}/obs/netCDF/{rean}_{hemi}_fsw.txt' scatter_FSW = f'{plot_dir}/obs_{hemi}_FSW-O3cap_Scatter.eps' - fig,ax = plot_o3_fsw_corr(uzm_50, o3_pcap[hemi], hemi,filepath) + fig, ax = plot_o3_fsw_corr(uzm_50, o3_pcap[hemi], hemi,filepath) ax.set_title(f'{rean}\n{hemi}, {obs_firstyr}-{obs_lastyr}', fontsize=20) fig.savefig(scatter_FSW) print(f'*** Plotting {hemi} UZM vs polar cap O3 lag correlations from rean') levcorr_plot = f'{plot_dir}/obs_{hemi}_UZM-O3cap_LagCorr_Lev.eps' - fig,ax = plot_o3_uwnd_lev_lags(uzm_band[hemi], o3_pcap[hemi], hemi) + fig, ax = plot_o3_uwnd_lev_lags(uzm_band[hemi], o3_pcap[hemi], hemi) plt.suptitle(f'{rean}, {hemi}, {obs_firstyr}-{obs_lastyr}', fontsize=20) fig.savefig(levcorr_plot) print(f'*** Plotting {hemi} trends in o3, temp, and UZM from rean') trends_plot = f'{plot_dir}/obs_{hemi}_Trends.eps' - fig,axs = plot_o3_seas_trends(uzm_band[hemi], o3_pcap[hemi], t_pcap[hemi]) + fig, axs = plot_o3_seas_trends(uzm_band[hemi], o3_pcap[hemi], t_pcap[hemi]) fig.suptitle(f'{rean}, {hemi}', fontsize=20) fig.savefig(trends_plot) -except: +except Exception as exc: print('*** Unable to create plots from the observational data: ') + print(exc) print(traceback.format_exc()) print('\n=====================================') diff --git a/diagnostics/stc_ozone/stc_ozone_defs.py b/diagnostics/stc_ozone/stc_ozone_defs.py index 30a40f708..ec6b8575a 100644 --- a/diagnostics/stc_ozone/stc_ozone_defs.py +++ b/diagnostics/stc_ozone/stc_ozone_defs.py @@ -1,4 +1,4 @@ -''' +""" This module contains functions used in the Stratospheric Ozone and Circulation POD. Contains: @@ -8,7 +8,7 @@ l_trend (calculates linear trends as a function of pressure vs month) t_test_corr (t-test for Pearson correlation coefficients) -''' +""" import numpy as np import xarray as xr @@ -16,7 +16,8 @@ from scipy.stats import linregress from cftime import DatetimeNoLeap -#************************************************************************************ +# ************************************************************************************ + def lat_avg(ds, lat_lo, lat_hi): r""" Calculate a meridional average of data. The average is done using @@ -48,7 +49,7 @@ def lat_avg(ds, lat_lo, lat_hi): """ # Limit the latitude range without assuming the ordering of lats - ds_tmp = ds.isel(lat = np.logical_and(ds.lat >= lat_lo, ds.lat <= lat_hi)) + ds_tmp = ds.isel(lat=np.logical_and(ds.lat >= lat_lo, ds.lat <= lat_hi)) # Define the cos(lat) weights wgts = np.cos(np.deg2rad(ds_tmp.lat)) @@ -58,7 +59,8 @@ def lat_avg(ds, lat_lo, lat_hi): ds_wgt_avg = ds_tmp.weighted(wgts).mean('lat') return ds_wgt_avg -#************************************************************************************* +# ************************************************************************************* + def calc_fsw(u_50mb_60deglat, hemi): r""" Calculate the final stratospheric warming date (the date in spring when the @@ -98,37 +100,37 @@ def calc_fsw(u_50mb_60deglat, hemi): tmpu = u_50mb_60deglat - #for data consistency, here force all monthly-means to be centered on the 15th of the month. + # for data consistency, here force all monthly-means to be centered on the 15th of the month. oldyear = tmpu["time.year"] oldmo = tmpu["time.month"] dates = [DatetimeNoLeap(year, month, 15) for year, month in zip(oldyear, oldmo)] tmpu["time"] = dates fsw = [] - smth = xr.DataArray(data=[0],dims=["time"],coords=dict(time=['1900-12-15'])) + smth = xr.DataArray(data=[0], dims=["time"], coords=dict(time=['1900-12-15'])) yrs = tmpu.groupby("time.year").mean().year.values # apply to each year separately for i, y in enumerate(yrs): - if (hemi == 'NH'): + if hemi == 'NH': raw = tmpu.sel(time=tmpu.time.dt.year.isin([y])) - if (hemi == 'SH'): - raw = tmpu.sel(time=tmpu.time.dt.year.isin([y,y+1])) #Need data to go into following year - raw = raw.isel(time=slice(0,14)) # reduce to just the first year + Jan of following year + if hemi == 'SH': + raw = tmpu.sel(time=tmpu.time.dt.year.isin([y, y+1])) # Need data to go into following year + raw = raw.isel(time=slice(0, 14)) # reduce to just the first year + Jan of following year def nharm(x): - - if x.any()==0: - return np.zeros(N) - fft_output = scipy.fft.fft(x) - freq = scipy.fft.fftfreq(len(x)) - filtered_fft_output = np.array([fft_output[i] if round(np.abs(1/f),2) in\ - [round(j,2) for j in [N,N/2,N/3,N/4,N/5]] else 0 for i, f in enumerate(freq)]) - filtered_sig = scipy.fft.ifft(filtered_fft_output) - filtered = filtered_sig.real - - return filtered + if x.any() == 0: + return np.zeros(N) + fft_output = scipy.fft.fft(x) + freq = scipy.fft.fftfreq(len(x)) + filtered_fft_output = np.array([fft_output[i] if round(np.abs(1/f), 2) in + [round(j, 2) for j in [N, N/2, N/3, N/4, N/5]] + else 0 for i, f in enumerate(freq)]) + filtered_sig = scipy.fft.ifft(filtered_fft_output) + filtered = filtered_sig.real + + return filtered N = len(raw) tmpvals = raw.values @@ -136,14 +138,14 @@ def nharm(x): filt = np.apply_along_axis(nharm, 0, tmpvals2) filt = filt.reshape(N) - xfiltered = xr.DataArray(filt, dims=('time'),\ - coords={'time':raw.time}) + xfiltered = xr.DataArray(filt, dims=('time'), + coords={'time': raw.time}) smth_new = xfiltered + raw.mean(dim='time') - if (hemi == 'NH'): - smth_all = xr.concat([smth,smth_new],dim='time') - if (hemi == 'SH'): - smth_all = xr.concat([smth,smth_new.isel(time=slice(0,12))],dim='time') + if hemi == 'NH': + smth_all = xr.concat([smth, smth_new], dim='time') + if hemi == 'SH': + smth_all = xr.concat([smth, smth_new.isel(time=slice(0, 12))], dim='time') if i == 0: smth = smth_all[1:] @@ -153,39 +155,40 @@ def nharm(x): # resample to Daily data. Note that this goes from Jan 1-Dec 1 in NH resamp = smth_new.resample(time='1D').interpolate('linear') - if (hemi == 'NH'): - #find where the resampled, smooth line crosses <5 m/s in boreal spring - #then confirm that it doesn't return to >5 m/s within 60 days - #If it does, then use next crossing below 5 m/s + if hemi == 'NH': + # find where the resampled, smooth line crosses <5 m/s in boreal spring + # then confirm that it doesn't return to >5 m/s within 60 days + # If it does, then use next crossing below 5 m/s thresh = 5 - if resamp.where(resampthresh,drop=True).isel(time=0).time + time2 = resamp_new.where(resamp_new > thresh, drop=True).isel(time=0).time if (time2.dt.dayofyear - time1.dt.dayofyear) < 60: - resamp_new = resamp.sel(time=slice(time2.time,resamp.isel(time=-1).time)) - time3 = resamp_new.where(resamp_new= lat_end: - lats = np.linspace(90,-90,num=73) - if lat_end > lat_first: - lats = np.linspace(-90,90,num=73) - - # Check to see if the longitudes are organized -180/180 or 0 to 360 - lon_first = ds[lonname].values[0] - print (lon_first, 'lon_first') - - if lon_first < 0: - lons = np.linspace(-180,177.5,num=144) - if lon_first >= 0: - lons = np.linspace(0,357.5,num=144) - - ds_out = xr.Dataset({'lat': (['lat'], lats),'lon': (['lon'], lons),}) - regridder = xe.Regridder(ds, ds_out, 'bilinear') - regridded = regridder(ds) - print (regridded, 'regridded') - - return regridded - + +def field_regridding(ds, latname, lonname): + r""" Regrid input data so that there are 73 latitude points. This grid + includes 5S and 5N, which are needed for the QBO analysis. This grid + includes 60N, which is needed for the SSW analysis. """ + + # Check to see if the latitudes are organized north to south or south to north + lat_first = ds[latname].values[0] + lat_end = ds[latname].values[-1] + + if lat_first >= lat_end: + lats = np.linspace(90, -90, num=73) + if lat_end > lat_first: + lats = np.linspace(-90, 90, num=73) + + # Check to see if the longitudes are organized -180/180 or 0 to 360 + lon_first = ds[lonname].values[0] + print(lon_first, 'lon_first') + + if lon_first < 0: + lons = np.linspace(-180, 177.5, num=144) + if lon_first >= 0: + lons = np.linspace(0, 357.5, num=144) + + ds_out = xr.Dataset({'lat': (['lat'], lats), 'lon': (['lon'], lons), }) + regridder = xe.Regridder(ds, ds_out, 'bilinear') + regridded = regridder(ds) + print(regridded, 'regridded') + + return regridded + + def compute_total_eddy_heat_flux(varray, tarray, vname, tname): + r""" Compute the total (all zonal wavenumbers) eddy heat flux + using monthly data. Output field has new variable, 'ehf.' """ + + # Take the zonal means of v and T + dummy = varray.mean('lon') + + eddyv = (varray - varray.mean('lon'))[vname] + eddyt = (tarray - tarray.mean('lon'))[tname] + + ehf = np.nanmean(eddyv.values * eddyt.values, axis=-1) + dummy[vname].values[:] = ehf + dummy = dummy.rename({vname: 'ehf'}) + print(dummy) + + return dummy + - r""" Compute the total (all zonal wavenumbers) eddy heat flux - using monthly data. Output field has new variable, 'ehf.' """ - - # Take the zonal means of v and T - dummy = varray.mean('lon') - - eddyv = (varray - varray.mean('lon'))[vname] - eddyt = (tarray - tarray.mean('lon'))[tname] - - ehf = np.nanmean(eddyv.values * eddyt.values,axis=-1) - dummy[vname].values[:] = ehf - dummy = dummy.rename({vname:'ehf'}) - print (dummy) - - return dummy - # Load the observational data # sfi = '/Volumes/Personal-Folders/CCP-Dillon/ERA5/stationary/POD/HadISST_sst.nc' @@ -98,17 +100,17 @@ def compute_total_eddy_heat_flux(varray, tarray, vname, tname): # Regrid # -sst_regridded = field_regridding(sst_ds,'latitude','longitude') -psl_regridded = field_regridding(psl_ds,'lat','lon') -uwnd_regridded = field_regridding(uwnd_ds,'lat','lon') -vwnd_regridded = field_regridding(vwnd_ds,'lat','lon') -air_regridded = field_regridding(air_ds,'lat','lon') +sst_regridded = field_regridding(sst_ds, 'latitude', 'longitude') +psl_regridded = field_regridding(psl_ds, 'lat', 'lon') +uwnd_regridded = field_regridding(uwnd_ds, 'lat', 'lon') +vwnd_regridded = field_regridding(vwnd_ds, 'lat', 'lon') +air_regridded = field_regridding(air_ds, 'lat', 'lon') # By the end of this block of code, the vwnd, air, and hgt variables # should each contain all available months of meridional wind, # air temperature and geopotential height, respectively. They can be # lazily loaded with xarray (e.g., after using open_mfdataset) -### END: READ INPUT FIELDS ### +# END: READ INPUT FIELDS ### r""" Compute the total (all zonal wavenumbers) eddy heat flux using monthly data. Output field has new variable, 'ehf.' """ @@ -119,38 +121,37 @@ def compute_total_eddy_heat_flux(varray, tarray, vname, tname): eddyv = (vwnd_regridded - vwnd_regridded.mean('lon'))['vwnd'] eddyt = (air_regridded - air_regridded.mean('lon'))['air'] -ehf = np.nanmean(eddyv.values * eddyt.values,axis=-1) +ehf = np.nanmean(eddyv.values * eddyt.values, axis=-1) dummy['vwnd'].values[:] = ehf -ehf = dummy.rename({'vwnd':'ehf'}) +ehf = dummy.rename({'vwnd': 'ehf'}) ehf.attrs['long_name'] = "Zonal Mean Eddy Heat Flux (v'T')" r""" Zonally average the zonal wind """ uzm = uwnd_regridded.mean('lon') -uzm = uzm.rename({'uwnd':'ua'}) +uzm = uzm.rename({'uwnd': 'ua'}) r""" Change name in psl file """ -psl_out = psl_regridded.rename({'prmsl':'psl'}) - -print ('######### BREAK ##########') - -print (sst_regridded) -print ('sst_regridded') -print (' ') -print (ehf) -print ('ehf') -print (' ') -print (uzm) -print ('uzm') -print (' ') -print (psl_ds) -print ('psl_ds') -print (' ') - +psl_out = psl_regridded.rename({'prmsl': 'psl'}) + +print('######### BREAK ##########') + +print(sst_regridded) +print('sst_regridded') +print(' ') +print(ehf) +print('ehf') +print(' ') +print(uzm) +print('uzm') +print(' ') +print(psl_ds) +print('psl_ds') +print(' ') # Merge DataArrays into output dataset out_ds = xr.merge([ehf, uzm, psl_out]) -print (out_ds) -out_ds = out_ds.rename({'level':'lev'}) +print(out_ds) +out_ds = out_ds.rename({'level': 'lev'}) out_ds.attrs['reanalysis'] = 'ERA5' out_ds.attrs['notes'] = 'Fields derived from monthly-mean ERA5 data on pressure levels' @@ -161,7 +162,7 @@ def compute_total_eddy_heat_flux(varray, tarray, vname, tname): sst_out_ds = sst_regridded sst_out_ds.attrs['reanalysis'] = 'HadiSST' sst_out_ds.attrs['notes'] = 'Fields derived from monthly-mean HadiSST sea surface temperature' -sst_out_ds = sst_out_ds.rename({'sst':'tos'}) +sst_out_ds = sst_out_ds.rename({'sst': 'tos'}) ''' # To reduce size of output file without changing results much, will thin the diff --git a/diagnostics/stc_qbo_enso/stc_qbo_enso.html b/diagnostics/stc_qbo_enso/stc_qbo_enso.html index c852bab5d..384e69ecc 100644 --- a/diagnostics/stc_qbo_enso/stc_qbo_enso.html +++ b/diagnostics/stc_qbo_enso/stc_qbo_enso.html @@ -39,7 +39,7 @@

STC QBO and ENSO stratospheric teleconnections

{{CASENAME}}

-ENSO teleconnections northern hemisphere +< color=navy>ENSO teleconnections northern hemisphere - @@ -67,14 +67,14 @@

{{CASENAME}}

-
Model @@ -57,8 +57,8 @@

{{CASENAME}}

Model eddy heat flux - Reanalysis eddy heat flux + Model eddy heat flux + Reanalysis eddy heat flux
Model sea level pressure - Reanalysis sea level pressure + Model sea level pressure + Reanalysis sea level pressure

-ENSO teleconnections southern hemisphere +< color=navy>ENSO teleconnections southern hemisphere - @@ -92,8 +92,8 @@

{{CASENAME}}

- @@ -102,14 +102,14 @@

{{CASENAME}}

-
Model @@ -82,8 +82,8 @@

{{CASENAME}}

Model zonal mean zonal wind - Reanalysis zonal mean zonal wind + Model zonal mean zonal wind + Reanalysis zonal mean zonal wind
Model eddy heat flux - Reanalysis eddy heat flux + Model eddy heat flux + Reanalysis eddy heat flux
Model sea level pressure - Reanalysis sea level pressure + Model sea level pressure + Reanalysis sea level pressure

-QBO teleconnections northern hemisphere +< color=navy>QBO teleconnections northern hemisphere - @@ -127,8 +129,10 @@

{{CASENAME}}

- @@ -144,7 +148,7 @@

{{CASENAME}}

-QBO teleconnections southern hemisphere +< color=navy>QBO teleconnections southern hemisphere
Model @@ -117,8 +117,10 @@

{{CASENAME}}

Model zonal mean zonal wind - Reanalysis zonal mean zonal wind + + Model zonal mean zonal wind + + Reanalysis zonal mean zonal wind
Model eddy heat flux - Reanalysis eddy heat flux + + Model eddy heat flux + + Reanalysis eddy heat flux
- @@ -162,8 +168,10 @@

{{CASENAME}}

- @@ -172,10 +180,11 @@

{{CASENAME}}

-
Model @@ -152,8 +156,10 @@

{{CASENAME}}

Model zonal mean zonal wind - Reanalysis zonal mean zonal wind + + Model zonal mean zonal wind + + Reanalysis zonal mean zonal wind
Model eddy heat flux - Reanalysis eddy heat flux + + Model eddy heat flux + + Reanalysis eddy heat flux
Model sea level pressure - Reanalysis sea level pressure + + Model sea level pressure + + Reanalysis sea level pressure

- diff --git a/diagnostics/stc_qbo_enso/stc_qbo_enso.py b/diagnostics/stc_qbo_enso/stc_qbo_enso.py index 0b9ef72ed..02672610c 100644 --- a/diagnostics/stc_qbo_enso/stc_qbo_enso.py +++ b/diagnostics/stc_qbo_enso/stc_qbo_enso.py @@ -134,7 +134,7 @@ import matplotlib.pyplot as plt import cartopy.crs as ccrs -from scipy.fft import fft,fftfreq +from scipy.fft import fft, fftfreq from scipy import interpolate from stc_qbo_enso_plottingcodeqbo import qbo_uzm @@ -148,389 +148,391 @@ mpl.rcParams['font.family'] = 'sans-serif' mpl.rcParams['font.sans-serif'] = 'Roboto' mpl.rcParams['font.size'] = 12 -mpl.rcParams['hatch.color']='gray' - -def qbo_metrics(ds,QBOisobar): - - r""" Calculates Quasi-Biennial Oscillation (QBO) metrics for the an input - zonal wind dataset (dimensions = time x level x latitude x longitude) - - Parameters - ---------- - ds : `xarray.DataArray` or `xarray.Dataset` - The input DataArray or Dataset for which to calculate QBO diagnostics for - - Returns - ------- - period_min: scalar - The minimum number of months comprising a full QBO cycle (a period of easterlies [EQBO] - and westerlies [WQBO]) - - period_mean: scalar - The average QBO cycle (EQBO + WQBO) duration in months - - period_max: scalar - The maximum QBO cycle (EQBO + WQBO) duration in months - - easterly_amp: scalar - The average easterly amplitude, arrived at by averaging together the minimum latitudinally - averaged 5S-5N 10 hPa monthly zonal wind from each QBO cycle - - westerly_amp: scalar - The average westerly amplitude, arrived at by averaging together the minimum latitudinally - averaged 5S-5N 10 hPa monthly zonal wind from each QBO cycle - - qbo_amp: scalar - The total QBO amplitude, which is estimated by adding half of the mean easterly amplitude - (easterly_amp) to half of the mean westerly amplitude (westerly_amp) - - lowest_lev: scalar - The lowermost pressure level at which the the latitudinally averaged 5S-5N QBO Fourier - amplitude is equal to 10% of its maximum value - - latitudinal_extent: scalar - The full width at half maximum of a Gaussian fit to the 10 hPa portion of the QBO - Fourier amplitude pressure-latitude cross section - - Notes - ----- - The input xarray variable ds is assumed to have a dimension named "time x lev x lat x lon." - E.g., if your data differs in dimension name, e.g., "latitude", use the rename method: - ds.rename({'latitude':'lat'}) - ds.rename({'level':'lev'}) - - period_min is required to be >= 14 months. This requirement is used because period_min and - period_max are the time bounds that determine which frequencies are most "QBO-like." This - step is needed, because say e.g., period_min was allowed to be less than 14 months and ended - up being equal to 12 months. Then the annual cycle zonal wind variability would be deemed - "QBO-like" and annual cycle variability would be incorporated into the QBO Fourier amplitude - calculations, rendering the Fourier amplitude and all of the QBO spatial metrics useless. - """ - - print ("Running the QBO metrics function 'qbo_metrics'") - - if ds.lev.values[-1] > ds.lev.values[0]: - ds = ds.reindex(lev=list(reversed(ds.lev))) - - uwnd = ds.ua.values - - # Subset for 10 hPa # - subset = ds.sel(lev=QBOisobar) - - # Select 5S-5N winds # - tropical_subset = subset.isel(lat = np.logical_and(subset.lat >= -5, subset.lat <= 5)) - - # Latitudinally weight and averaged betwteen 5S-5N - qbo = tropical_subset.mean('lat') - weights = np.cos(np.deg2rad(tropical_subset.lat.values)) - interim = np.multiply(tropical_subset.ua.values,weights[np.newaxis,:]) - interim = np.nansum(interim,axis=1) - interim = np.true_divide(interim,np.sum(weights)) - qbo.ua.values[:] = interim[:] - - # Smooth with five month running mean # - qbo = qbo.rolling(time=5, center=True).mean() - - # Identify the indices corresponding to QBO phase changes (e.g., + zonal wind (westerly) -> - zonal wind (easterly)) - zero_crossings = np.where(np.diff(np.sign(qbo.ua.values)))[0] - - # Using the phase change indices, identify QBO cycles: a set of easterlies and westerlies. After doing this, # - # the minimum/maximum/average QBO cycle duration will be retrieved. The first phase change index from # - # zero_crossings is excluded in the event that the first QBO cycle is close to making a phase change, which would bias # - # the QBO duration statistics low. # - store = [] - cycles = [] - periods = [] - - for i,v in enumerate(zero_crossings): - - # Append pairs of easterly/westerly winds to "store" # - if i != 0: - tmp = qbo.ua.values[zero_crossings[i-1]+1:zero_crossings[i]+1] - store.append(tmp) - - # Retrieve the most recent QBO cycle (easterlies + westerlies) from store. Loop looks for even number indices. # - # E.g., if i == 2, one set of QBO easterlies and westerlies has been appended to store already. # - if i != 0 and i % 2 == 0: - concat = np.concatenate((store[-2],store[-1])) - - # Inserting requirement that each cycle must be at least 14 months. No observed QBO cycle has progressed # - # this quickly, but cycles do in the models. This requirement is essential because the minimum and maximum QBO cycle - # durations are used to calculate the QBO Fourier amplitude. A minimum QBO cycle duration of, say, 12 months # - # would lead to the QBO Fourier amplitude overlapping with the annual cycle Fourier amplitude. - if len(concat) >= 14: - cycles.append(concat) - periods.append(len(concat)) - - # Retrieve the minimum/maximum/average QBO cycle duration # - period_min = np.round(np.nanmin(periods),1) - period_max = np.round(np.nanmax(periods),1) - period_mean = np.round(np.nanmean(periods),1) - - print (period_min, "minimum period (months)") - print (period_mean, "mean period (months)") - print (period_max, "maximum period (months)") - - # Retrieve the minimum/maximum zonal wind from each QBO cycle. Averaging the minima (maxima) gives us the # - # easterly (westerly) amplitute # - - easterly_amp = np.round(np.nanmean([np.nanmin(v) for v in cycles]),1) - westerly_amp = np.round(np.nanmean([np.nanmax(v) for v in cycles]),1) - - print (easterly_amp, 'easterly amplitude') - print (westerly_amp, 'westerly amplitude') - - # Define the 10 hPa amplitude as in Richter et al. (2020) - qbo_amp = np.abs(easterly_amp/2) + np.abs(westerly_amp/2) - qbo_amp = np.round(qbo_amp,1) - print (qbo_amp, '10 hPa amplitude') - - ################################################################################################################# - # Retrieve the QBO Fourier amplitude, defined as in Pascoe et al. (2005) as the ratio of the QBO power spectrum # - # to the power spectrum of the entire zonal wind dataset, multiplied by a metric of the zonal wind variability, # - # the standard deviation of the zonal wind dataset. # - ################################################################################################################# - - # Standard deviation across entire zonal wind dataset # - std = np.nanstd(uwnd,axis=0) - - # Define Fourier frequencies comprising data and filter for frequencies between minimum/maximum QBO cycle duration # - freq = 1/fftfreq(len(uwnd)) - arr = np.where((freq > period_min) & (freq < period_max))[0] - - # FFT of entire zonal wind dataset. Square and sum Fourier coefficients. np.abs applies unneeded square root, hence # - # np.power to power 2 is used to undo this # - y = fft(uwnd,axis=0) - amplitudes = np.power(np.abs(y)[:len(y)//2],2) - - # Calculate ratio of QBO power spectrum to full zonal wind power spectrum # - quotients = [] - for i,v in enumerate(ds.lev.values): - qbodata = np.nansum(amplitudes[arr,i],axis=0) - alldata = np.nansum(amplitudes[1:,i],axis=0) - quot = np.true_divide(qbodata,alldata) - quotients.append(quot) - filtered = np.array(quotients) - - # Ratio of the QBO power spectrum to the power # - # spectrum of the entire model dataset (units:%) # - - vmin = 0 - vmax = 100 - vlevs = np.linspace(vmin,vmax,num=21) - - from palettable.colorbrewer.diverging import RdBu_11 - x2,y2 = np.meshgrid(ds.lat.values,ds.lev.values) - - plt.title('Ratio of QBO power spectrum\n to zonal wind spectrum (%)') - plt.contourf(x2,y2,filtered*100,vmin=vmin,vmax=vmax,levels=vlevs,cmap='coolwarm') - plt.semilogy() - plt.gca().invert_yaxis() - if np.nanmax(ds.lev.values) > 2000: - plt.ylabel('Pressure (Pa)') - if np.nanmax(ds.lev.values) < 2000: - plt.ylabel('Pressure (hPa)') - plt.xlabel('Latitude') - plt.colorbar() - - # Retrieve the Fourier amplitude by multiplying aforementioned ratio by standard deviation of zonal wind # - fa = np.multiply(filtered,std) - - ################################################################################################################# - # Do the Fourier amplitude calculations between 5S and 5N to retrieve spatial metrics (e.g., latitudinal width) # - # of the QBO # - ################################################################################################################# - - # hmax fixed at 10 hPa per Richter et al. (2020, JGR) # - hmax = np.where(ds.lev.values == qbo.lev.values)[0] - - # Retreive the indices of lats between 5S and 5N # - lat_hits = [i for i,v in enumerate(ds.lat.values) if v >= -5 and v <=5] - - # Retrieve the Fourier amplitude profile averaged latitudinally (w/weighting) between 5S and 5N # - weights = np.cos(np.deg2rad(ds.lat.values[lat_hits])) - interim = np.multiply(fa[:,lat_hits],weights[np.newaxis,:]) - interim2 = np.nansum(interim,axis=1) - height_profile = np.true_divide(interim2,np.sum(weights)) - - # Retrieve half the max Fourier amplitude and 10% of the max Fourier amplitude # - half_max = np.max(height_profile)/2 - qbo_base = np.max(height_profile)*0.1 - - # Interpolate the equatorial Fourier amplitude profile to have 10000 vertical grid points, enough # - # points so that each isobar can be selected to a one tenth of a hPa accuracy # - f = interpolate.interp1d(ds.lev.values,height_profile) - xnew = np.linspace(ds.lev.values[0],ds.lev.values[-1],num=10000) - ynew = f(xnew) - - # Of the 20,000 vertical grid points, find the one corresponding to hmax. # - hmax_idx = (np.abs(xnew - ds.lev.values[hmax])).argmin() - - # The lower and upper portions of the fourier amplitude tropical wind height profile, # - # which has been interpolated to 10000 grid points. # - lower_portion = ynew[:hmax_idx] - upper_portion = ynew[hmax_idx:] - - # Same as above, but these are lists of the isobars corresponding to the above fourier amplitudes. # - lower_portion_isobar = xnew[:hmax_idx] - upper_portion_isobar = xnew[hmax_idx:] - - # Retrieve the indices in the upper/lower portions corresponding to half the fourier max. # - lower_vertical_extent = (np.abs(lower_portion - half_max)).argmin() - upper_vertical_extent = (np.abs(upper_portion - half_max)).argmin() - - # Find the upper/lower portion isboars corresponding to the half fourier max values identified above. # - bottom = lower_portion_isobar[lower_vertical_extent] - top = upper_portion_isobar[upper_vertical_extent] - - # Convert the isobars into altitudes in meters. # - sfc = 1000 # hPa - bottomz = np.log(bottom/sfc)*-7000 - topz = np.log(top/sfc)*-7000 - - # Obtain the vertical extent by differencing the bottomz and topz. # - vertical_extent = (topz - bottomz)/1000 - vertical_extent = np.round(vertical_extent,1) - print (vertical_extent, "vertical_extent") - - # Retrieve the lowest lev the QBO extends to using 10% of the maximum Fourier amplitude # - # "lower" for CMIP6 datasets and "upper" for ERA5 reanalysis - lowest_lev = (lower_portion_isobar[(np.abs(lower_portion - qbo_base)).argmin()]) - lowest_lev = np.round(lowest_lev,1) - print (lowest_lev, "lowest_lev") - - ############################################################################################## - ############################################################################################## - ############################################################################################## - - # Retrieve the latitudinal extent # - - # https://www.geeksforgeeks.org/python-gaussian-fit/ - xdata = ds.lat.values - ydata = fa[hmax][0] - ydata[0] = 0 - ydata[-1] = 0 - - # Recast xdata and ydata into numpy arrays so we can use their handy features - xdata = np.array(xdata) - ydata = np.array(ydata) - - from scipy.optimize import curve_fit - - def gauss(x, H, A, x0, sigma): - return H + A * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) - - def gauss_fit(x, y): - mean = sum(x * y) / sum(y) - sigma = np.sqrt(sum(y * (x - mean) ** 2) / sum(y)) - popt, pcov = curve_fit(gauss, x, y, p0=[min(y), max(y), mean, sigma]) - return popt - - out = gauss(xdata, *gauss_fit(xdata, ydata)) - - f = interpolate.interp1d(ds.lat.values,out) - xnew = np.linspace(ds.lat.values[0],ds.lat.values[-1],num=10000) - ynew = f(xnew) - - lower_portion = ynew[:5000] - upper_portion = ynew[5000:] - - lower_portion_lat = xnew[:5000] - upper_portion_lat = xnew[5000:] - - lat1 = lower_portion_lat[(np.abs(lower_portion - (np.max(out)/2))).argmin()] - lat2 = upper_portion_lat[(np.abs(upper_portion - (np.max(out)/2))).argmin()] - - latitudinal_extent = np.abs(lat1) + np.abs(lat2) - latitudinal_extent = np.round(latitudinal_extent,1) - - print (latitudinal_extent, "latitudinal_extent") - - - if period_min != period_max: - print ('Based on period statistics, dataset is likely to have a QBO') - qbo_switch = 1 - else: - print ('Persistent stratospheric easterlies detected - dataset likely does not have QBO') - qbo_switch = 0 - - metrics = ['minimum period: %s (months)' % period_min, - 'mean period: %s (months)' % period_mean, - 'maximum period: %s (months)' % period_max, - 'easterly amplitude: %s (m/s)' % easterly_amp, - 'westerly amplitude: %s (m/s)' % westerly_amp, - 'QBO amplitude: %s (m/s)' % qbo_amp, - 'lowest QBO level: %s (hPa)' % lowest_lev, - 'vertical extent: %s (kilometers)' % vertical_extent, - 'latitudinal extent of QBO: %s (degrees)' % latitudinal_extent] - - return metrics, qbo_switch - - +mpl.rcParams['hatch.color'] = 'gray' + + +def qbo_metrics(ds, QBOisobar): + r""" Calculates Quasi-Biennial Oscillation (QBO) metrics for the input + zonal wind dataset (dimensions = time x level x latitude x longitude) + + Parameters + ---------- + ds : `xarray.DataArray` or `xarray.Dataset` + The input DataArray or Dataset for which to calculate QBO diagnostics for + + Returns + ------- + period_min: scalar + The minimum number of months comprising a full QBO cycle (a period of easterlies [EQBO] + and westerlies [WQBO]) + + period_mean: scalar + he average QBO cycle (EQBO + WQBO) duration in months + + period_max: scalar + The maximum QBO cycle (EQBO + WQBO) duration in months + + easterly_amp: scalar + The average easterly amplitude, arrived at by averaging together the minimum latitudinally + averaged 5S-5N 10 hPa monthly zonal wind from each QBO cycle + + westerly_amp: scalar + The average westerly amplitude, arrived at by averaging together the minimum latitudinally + averaged 5S-5N 10 hPa monthly zonal wind from each QBO cycle + + qbo_amp: scalar + The total QBO amplitude, which is estimated by adding half of the mean easterly amplitude + (easterly_amp) to half of the mean westerly amplitude (westerly_amp) + + lowest_lev: scalar + The lowermost pressure level at which the the latitudinally averaged 5S-5N QBO Fourier + amplitude is equal to 10% of its maximum value + + latitudinal_extent: scalar + The full width at half maximum of a Gaussian fit to the 10 hPa portion of the QBO + Fourier amplitude pressure-latitude cross section + + Notes + ----- + The input xarray variable ds is assumed to have a dimension named "time x lev x lat x lon." + E.g., if your data differs in dimension name, e.g., "latitude", use the rename method: + ds.rename({'latitude':'lat'}) + ds.rename({'level':'lev'}) + + period_min is required to be >= 14 months. This requirement is used because period_min and + period_max are the time bounds that determine which frequencies are most "QBO-like." This + step is needed, because say e.g., period_min was allowed to be less than 14 months and ended + up being equal to 12 months. Then the annual cycle zonal wind variability would be deemed + "QBO-like" and annual cycle variability would be incorporated into the QBO Fourier amplitude + calculations, rendering the Fourier amplitude and all of the QBO spatial metrics useless. + """ + + print("Running the QBO metrics function 'qbo_metrics'") + + if ds.lev.values[-1] > ds.lev.values[0]: + ds = ds.reindex(lev=list(reversed(ds.lev))) + + uwnd = ds.ua.values + + # Subset for 10 hPa # + subset = ds.sel(lev=QBOisobar) + + # Select 5S-5N winds # + tropical_subset = subset.isel(lat=np.logical_and(subset.lat >= -5, subset.lat <= 5)) + + # Latitudinally weight and averaged betwteen 5S-5N + qbo = tropical_subset.mean('lat') + weights = np.cos(np.deg2rad(tropical_subset.lat.values)) + interim = np.multiply(tropical_subset.ua.values, weights[np.newaxis, :]) + interim = np.nansum(interim, axis=1) + interim = np.true_divide(interim, np.sum(weights)) + qbo.ua.values[:] = interim[:] + + # Smooth with five month running mean # + qbo = qbo.rolling(time=5, center=True).mean() + + # Identify the indices corresponding to QBO phase changes (e.g., + zonal wind (westerly) -> - zonal wind (easterly)) + zero_crossings = np.where(np.diff(np.sign(qbo.ua.values)))[0] + + # Using the phase change indices, identify QBO cycles: a set of easterlies and westerlies. After doing this, # + # the minimum/maximum/average QBO cycle duration will be retrieved. The first phase change index from # + # zero_crossings is excluded in the event that the first QBO cycle is close to making a phase change, which + # would bias # + # the QBO duration statistics low. # + store = [] + cycles = [] + periods = [] + + for i, v in enumerate(zero_crossings): + + # Append pairs of easterly/westerly winds to "store" # + if i != 0: + tmp = qbo.ua.values[zero_crossings[i - 1] + 1:zero_crossings[i] + 1] + store.append(tmp) + + # Retrieve the most recent QBO cycle (easterlies + westerlies) from store. Loop looks for even number indices. # + # E.g., if i == 2, one set of QBO easterlies and westerlies has been appended to store already. # + if i != 0 and i % 2 == 0: + concat = np.concatenate((store[-2], store[-1])) + + # Inserting requirement that each cycle must be at least 14 months. No observed QBO cycle has progressed # + # this quickly, but cycles do in the models. This requirement is essential because the minimum and maximum + # QBO cycle + # durations are used to calculate the QBO Fourier amplitude. A minimum QBO cycle duration of, say, + # 12 months would lead to the QBO Fourier amplitude overlapping with the annual cycle Fourier amplitude. + if len(concat) >= 14: + cycles.append(concat) + periods.append(len(concat)) + + # Retrieve the minimum/maximum/average QBO cycle duration # + period_min = np.round(np.nanmin(periods), 1) + period_max = np.round(np.nanmax(periods), 1) + period_mean = np.round(np.nanmean(periods), 1) + + print(period_min, "minimum period (months)") + print(period_mean, "mean period (months)") + print(period_max, "maximum period (months)") + + # Retrieve the minimum/maximum zonal wind from each QBO cycle. Averaging the minima (maxima) gives us the # + # easterly (westerly) amplitude # + + easterly_amp = np.round(np.nanmean([np.nanmin(v) for v in cycles]), 1) + westerly_amp = np.round(np.nanmean([np.nanmax(v) for v in cycles]), 1) + + print(easterly_amp, 'easterly amplitude') + print(westerly_amp, 'westerly amplitude') + + # Define the 10 hPa amplitude as in Richter et al. (2020) + qbo_amp = np.abs(easterly_amp / 2) + np.abs(westerly_amp / 2) + qbo_amp = np.round(qbo_amp, 1) + print(qbo_amp, '10 hPa amplitude') + + ################################################################################################################# + # Retrieve the QBO Fourier amplitude, defined as in Pascoe et al. (2005) as the ratio of the QBO power spectrum # + # to the power spectrum of the entire zonal wind dataset, multiplied by a metric of the zonal wind variability, # + # the standard deviation of the zonal wind dataset. # + ################################################################################################################# + + # Standard deviation across entire zonal wind dataset # + std = np.nanstd(uwnd, axis=0) + + # Define Fourier frequencies comprising data and filter for frequencies between minimum/maximum QBO cycle duration # + freq = 1 / fftfreq(len(uwnd)) + arr = np.where((freq > period_min) & (freq < period_max))[0] + + # FFT of entire zonal wind dataset. Square and sum Fourier coefficients. np.abs applies unneeded square root, hence + # np.power to power 2 is used to undo this # + y = fft(uwnd, axis=0) + amplitudes = np.power(np.abs(y)[:len(y) // 2], 2) + + # Calculate ratio of QBO power spectrum to full zonal wind power spectrum # + quotients = [] + for i, v in enumerate(ds.lev.values): + qbodata = np.nansum(amplitudes[arr, i], axis=0) + alldata = np.nansum(amplitudes[1:, i], axis=0) + quot = np.true_divide(qbodata, alldata) + quotients.append(quot) + filtered = np.array(quotients) + + # Ratio of the QBO power spectrum to the power # + # spectrum of the entire model dataset (units:%) # + + vmin = 0 + vmax = 100 + vlevs = np.linspace(vmin, vmax, num=21) + + x2, y2 = np.meshgrid(ds.lat.values, ds.lev.values) + + plt.title('Ratio of QBO power spectrum\n to zonal wind spectrum (%)') + plt.contourf(x2, y2, filtered * 100, vmin=vmin, vmax=vmax, levels=vlevs, cmap='coolwarm') + plt.semilogy() + plt.gca().invert_yaxis() + if np.nanmax(ds.lev.values) > 2000: + plt.ylabel('Pressure (Pa)') + if np.nanmax(ds.lev.values) < 2000: + plt.ylabel('Pressure (hPa)') + plt.xlabel('Latitude') + plt.colorbar() + + # Retrieve the Fourier amplitude by multiplying aforementioned ratio by standard deviation of zonal wind # + fa = np.multiply(filtered, std) + + ################################################################################################################# + # Do the Fourier amplitude calculations between 5S and 5N to retrieve spatial metrics (e.g., latitudinal width) # + # of the QBO # + ################################################################################################################# + + # hmax fixed at 10 hPa per Richter et al. (2020, JGR) # + hmax = np.where(ds.lev.values == qbo.lev.values)[0] + + # Retreive the indices of lats between 5S and 5N # + lat_hits = [i for i, v in enumerate(ds.lat.values) if v >= -5 and v <= 5] + + # Retrieve the Fourier amplitude profile averaged latitudinally (w/weighting) between 5S and 5N # + weights = np.cos(np.deg2rad(ds.lat.values[lat_hits])) + interim = np.multiply(fa[:, lat_hits], weights[np.newaxis, :]) + interim2 = np.nansum(interim, axis=1) + height_profile = np.true_divide(interim2, np.sum(weights)) + + # Retrieve half the max Fourier amplitude and 10% of the max Fourier amplitude # + half_max = np.max(height_profile) / 2 + qbo_base = np.max(height_profile) * 0.1 + + # Interpolate the equatorial Fourier amplitude profile to have 10000 vertical grid points, enough # + # points so that each isobar can be selected to a one tenth of a hPa accuracy # + f = interpolate.interp1d(ds.lev.values, height_profile) + xnew = np.linspace(ds.lev.values[0], ds.lev.values[-1], num=10000) + ynew = f(xnew) + + # Of the 20,000 vertical grid points, find the one corresponding to hmax. # + hmax_idx = (np.abs(xnew - ds.lev.values[hmax])).argmin() + + # The lower and upper portions of the fourier amplitude tropical wind height profile, # + # which has been interpolated to 10000 grid points. # + lower_portion = ynew[:hmax_idx] + upper_portion = ynew[hmax_idx:] + + # Same as above, but these are lists of the isobars corresponding to the above fourier amplitudes. # + lower_portion_isobar = xnew[:hmax_idx] + upper_portion_isobar = xnew[hmax_idx:] + + # Retrieve the indices in the upper/lower portions corresponding to half the fourier max. # + lower_vertical_extent = (np.abs(lower_portion - half_max)).argmin() + upper_vertical_extent = (np.abs(upper_portion - half_max)).argmin() + + # Find the upper/lower portion isboars corresponding to the half fourier max values identified above. # + bottom = lower_portion_isobar[lower_vertical_extent] + top = upper_portion_isobar[upper_vertical_extent] + + # Convert the isobars into altitudes in meters. # + sfc = 1000 # hPa + bottomz = np.log(bottom / sfc) * -7000 + topz = np.log(top / sfc) * -7000 + + # Obtain the vertical extent by differencing the bottomz and topz. # + vertical_extent = (topz - bottomz) / 1000 + vertical_extent = np.round(vertical_extent, 1) + print(vertical_extent, "vertical_extent") + + # Retrieve the lowest lev the QBO extends to using 10% of the maximum Fourier amplitude # + # "lower" for CMIP6 datasets and "upper" for ERA5 reanalysis + lowest_lev = (lower_portion_isobar[(np.abs(lower_portion - qbo_base)).argmin()]) + lowest_lev = np.round(lowest_lev, 1) + print(lowest_lev, "lowest_lev") + + ############################################################################################## + ############################################################################################## + ############################################################################################## + + # Retrieve the latitudinal extent # + + # https://www.geeksforgeeks.org/python-gaussian-fit/ + xdata = ds.lat.values + ydata = fa[hmax][0] + ydata[0] = 0 + ydata[-1] = 0 + + # Recast xdata and ydata into numpy arrays so we can use their handy features + xdata = np.array(xdata) + ydata = np.array(ydata) + + from scipy.optimize import curve_fit + + def gauss(x, H, A, x0, sigma): + return H + A * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + + def gauss_fit(x, y): + mean = sum(x * y) / sum(y) + sigma = np.sqrt(sum(y * (x - mean) ** 2) / sum(y)) + popt, pcov = curve_fit(gauss, x, y, p0=[min(y), max(y), mean, sigma]) + return popt + + out = gauss(xdata, *gauss_fit(xdata, ydata)) + + f = interpolate.interp1d(ds.lat.values, out) + xnew = np.linspace(ds.lat.values[0], ds.lat.values[-1], num=10000) + ynew = f(xnew) + + lower_portion = ynew[:5000] + upper_portion = ynew[5000:] + + lower_portion_lat = xnew[:5000] + upper_portion_lat = xnew[5000:] + + lat1 = lower_portion_lat[(np.abs(lower_portion - (np.max(out) / 2))).argmin()] + lat2 = upper_portion_lat[(np.abs(upper_portion - (np.max(out) / 2))).argmin()] + + latitudinal_extent = np.abs(lat1) + np.abs(lat2) + latitudinal_extent = np.round(latitudinal_extent, 1) + + print(latitudinal_extent, "latitudinal_extent") + + if period_min != period_max: + print('Based on period statistics, dataset is likely to have a QBO') + qbo_switch = 1 + else: + print('Persistent stratospheric easterlies detected - dataset likely does not have QBO') + qbo_switch = 0 + + metrics = ['minimum period: %s (months)' % period_min, + 'mean period: %s (months)' % period_mean, + 'maximum period: %s (months)' % period_max, + 'easterly amplitude: %s (m/s)' % easterly_amp, + 'westerly amplitude: %s (m/s)' % westerly_amp, + 'QBO amplitude: %s (m/s)' % qbo_amp, + 'lowest QBO level: %s (hPa)' % lowest_lev, + 'vertical extent: %s (kilometers)' % vertical_extent, + 'latitudinal extent of QBO: %s (degrees)' % latitudinal_extent] + + return metrics, qbo_switch + + def compute_total_eddy_heat_flux(v, T): - r""" Compute the total zonal mean eddy heat flux from meridonal winds - and temperatures. The eddy heat flux is calculated as: - ehf = zonal_mean( (v - zonal_mean(v)) * (T - zonal_mean(T))) - - Parameters - ---------- - v : `xarray.DataArray` - The meridional wind component. Assumed to have the same dimensions as T - - T : `xarray.DataArray` - The air temperature. Assumed to have the same dimensions as v - - Returns - ------- - ehf : `xarray.DataArray` - The zonal mean eddy heat flux - - Notes - ----- - The input fields v and T are assumed to have dimensions named "lat" - and "lon". E.g., if your data has dimensions "latitude" and/or "longitude", - use the rename method: - ds.rename({'latitude':'lat','longitude':'lon'}) - - Ideally v and T would be provided on the same latitude/longitude grid. - In practice this is not necessarily the case as some models provide - different variables at cell-centers and cell-edges. If this is found - to be the case, this function will use xesmf to do bilinear regridding - of the meridional wind component to match the grid of the temperatures. - - """ - - # Take the zonal means of v and T - v_zm = v.mean('lon') - T_zm = T.mean('lon') - - # If v and T are on same grid, can multiply the eddies and take zonal mean - if (np.array_equal(v.lat,T.lat)) and (np.array_equal(v.lon, T.lon)): - ehf = ((v - v_zm) * (T - T_zm)).mean('lon') - - # If v and T are on different grids, interpolate v to T's grid beforehand - else: - # Set up xESMF regridder with necessary grid-defining datasets - print('*** Interpolating v to same grid as T') - in_grid = xr.Dataset( - { - "lat": (["lat"], v.lat.values), - "lon": (["lon"], v.lon.values), - } - ) - - out_grid = xr.Dataset( - { - "lat": (["lat"], T.lat.values), - "lon": (["lon"], T.lon.values), - } - ) - regridder = xe.Regridder(in_grid, out_grid, "bilinear") - - ehf = (regridder(v - v_zm)*(T - T_zm)).mean('lon') - - return ehf - + r""" Compute the total zonal mean eddy heat flux from meridonal winds + and temperatures. The eddy heat flux is calculated as: + ehf = zonal_mean( (v - zonal_mean(v)) * (T - zonal_mean(T))) + + Parameters + ---------- + v : `xarray.DataArray` + The meridional wind component. Assumed to have the same dimensions as T + + T : `xarray.DataArray` + The air temperature. Assumed to have the same dimensions as v + + Returns + ------- + ehf : `xarray.DataArray` + The zonal mean eddy heat flux + + Notes + ---- + The input fields v and T are assumed to have dimensions named "lat" + and "lon". E.g., if your data has dimensions "latitude" and/or "longitude", + use the rename method: + ds.rename({'latitude':'lat','longitude':'lon'}) + + Ideally v and T would be provided on the same latitude/longitude grid. + In practice this is not necessarily the case as some models provide + different variables at cell-centers and cell-edges. If this is found + to be the case, this function will use xesmf to do bilinear regridding + of the meridional wind component to match the grid of the temperatures. + + """ + + # Take the zonal means of v and T + + v_zm = v.mean('lon') + T_zm = T.mean('lon') + + # If v and T are on same grid, can multiply the eddies and take zonal mean + if (np.array_equal(v.lat, T.lat)) and (np.array_equal(v.lon, T.lon)): + ehf = ((v - v_zm) * (T - T_zm)).mean('lon') + + # If v and T are on different grids, interpolate v to T's grid beforehand + else: + # Set up xESMF regridder with necessary grid-defining datasets + print('*** Interpolating v to same grid as T') + in_grid = xr.Dataset( + { + "lat": (["lat"], v.lat.values), + "lon": (["lon"], v.lon.values), + } + ) + + out_grid = xr.Dataset( + { + "lat": (["lat"], T.lat.values), + "lon": (["lon"], T.lon.values), + } + ) + regridder = xe.Regridder(in_grid, out_grid, "bilinear") + + ehf = (regridder(v - v_zm) * (T - T_zm)).mean('lon') + + return ehf + + ################################################################################################## ################################################################################################## ################################################################################################## @@ -545,45 +547,45 @@ def compute_total_eddy_heat_flux(v, T): # Parse MDTF-set environment variables print('*** Parse MDTF-set environment variables ...') CASENAME = os.environ['CASENAME'] -FIRSTYR = os.environ['FIRSTYR'] -LASTYR = os.environ['LASTYR'] -WK_DIR = os.environ['WK_DIR'] +FIRSTYR = os.environ['startdate'] +LASTYR = os.environ['enddate'] +WK_DIR = os.environ['WORK_DIR'] OBS_DATA = os.environ['OBS_DATA'] QBOisobar = os.environ['QBOisobar'] ########################################################################### -################################ observations ############################# +# ############################### observations ############################ ########################################################################### print(f'*** Now working on obs data\n------------------------------') -obs_file_atm = OBS_DATA+'/stc-qbo-enso-obs-atm.nc' -obs_file_ocn = OBS_DATA+'/stc-qbo-enso-obs-ocn.nc' +obs_file_atm = OBS_DATA + '/stc-qbo-enso-obs-atm.nc' +obs_file_ocn = OBS_DATA + '/stc-qbo-enso-obs-ocn.nc' print(f'*** Reading obs data from {obs_file_atm}') obs_atm = xr.open_dataset(obs_file_atm) -print (obs_atm, 'obs_atm') +print(obs_atm, 'obs_atm') print(f'*** Reading obs data from {obs_file_ocn}') obs_sst = xr.open_dataset(obs_file_ocn) -print (obs_sst, 'obs_sst') +print(obs_sst, 'obs_sst') # Subset the data for the user defined first and last years # -obs_atm = obs_atm.sel(time=slice(str(FIRSTYR),str(LASTYR))) -obs_sst = obs_sst.sel(time=slice(str(FIRSTYR),str(LASTYR))) +obs_atm = obs_atm.sel(time=slice(str(FIRSTYR), str(LASTYR))) +obs_sst = obs_sst.sel(time=slice(str(FIRSTYR), str(LASTYR))) # Create the POD figures directory plot_dir = f'{WK_DIR}/obs/' ################################################ -print ('*** Running the observed ENSO indexing') +print('*** Running the observed ENSO indexing') ################################################ # Extract the tropical domain # -ENSO = obs_sst.isel(lat = np.logical_and(obs_sst.lat >= -5, obs_sst.lat <= 5)) +ENSO = obs_sst.isel(lat=np.logical_and(obs_sst.lat >= -5, obs_sst.lat <= 5)) # Extract date and longitude info from ENSO dataset # date_first = obs_sst.time[0] @@ -592,43 +594,45 @@ def compute_total_eddy_heat_flux(v, T): # Identify the correct ENSO longitudinal grid # if lon_first < 0: - ENSO = ENSO.sel(lon=slice(-170,-120)) + ENSO = ENSO.sel(lon=slice(-170, -120)) else: - ENSO = ENSO.sel(lon=slice(190,240)) + ENSO = ENSO.sel(lon=slice(190, 240)) # Latitudinally average the ENSO data # weighted_mean = ENSO.mean('lat') weights = np.cos(np.deg2rad(ENSO.lat.values)) -interim = np.multiply(ENSO.tos.values,weights[np.newaxis,:,np.newaxis]) -interim = np.nansum(interim,axis=1) -interim = np.true_divide(interim,np.sum(weights)) +interim = np.multiply(ENSO.tos.values, weights[np.newaxis, :, np.newaxis]) +interim = np.nansum(interim, axis=1) +interim = np.true_divide(interim, np.sum(weights)) weighted_mean.tos.values[:] = interim[:] + def enso_index(seasonal): + # Create 5-month seasonally averaged standardized ENSO anomalies. Weight each month by number of days comprising + # month # + day_in_month_weights = seasonal.time.dt.days_in_month.values[:5] / np.sum(seasonal.time.dt.days_in_month.values[:5]) + sstindex = np.reshape(seasonal.tos.values, (int(len(seasonal.tos.values) / 5), 5)) + sstindex = np.nanmean(np.multiply(sstindex, day_in_month_weights[np.newaxis, :]), axis=1) + anom = np.subtract(sstindex, np.nanmean(sstindex)) + anom = np.true_divide(anom, np.nanstd(sstindex)) - # Create 5-month seasonally averaged standardized ENSO anomalies. Weight each month by number of days comprising month # - day_in_month_weights = seasonal.time.dt.days_in_month.values[:5]/np.sum(seasonal.time.dt.days_in_month.values[:5]) - sstindex = np.reshape(seasonal.tos.values,(int(len(seasonal.tos.values)/5),5)) - sstindex = np.nanmean(np.multiply(sstindex,day_in_month_weights[np.newaxis,:]),axis=1) - anom = np.subtract(sstindex,np.nanmean(sstindex)) - anom = np.true_divide(anom,np.nanstd(sstindex)) + # Get the unique years from "seasonal" and then remove the last one, which is not needed + years = [v for v in set(np.sort(seasonal.time.dt.year.values))] + nina_years = [years[i] for i, v in enumerate(anom) if v <= -1] + nino_years = [years[i] for i, v in enumerate(anom) if v >= 1] + + return nina_years, nino_years - # Get the unique years from "seasonal" and then remove the last one, which is not needed - years = [v for v in set(np.sort(seasonal.time.dt.year.values))] - nina_years = [years[i] for i,v in enumerate(anom) if v <= -1] - nino_years = [years[i] for i,v in enumerate(anom) if v >= 1] - - return nina_years, nino_years # Subsample ENSO data for NH # seasonal = weighted_mean.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) -seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([11,12,1,2,3])).mean('lon') +seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([11, 12, 1, 2, 3])).mean('lon') nh_nina, nh_nino = enso_index(seasonal) seasonal.close() # Subsample ENSO data for SH # seasonal = weighted_mean.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) -seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([9,10,11,12,1])).mean('lon') +seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([9, 10, 11, 12, 1])).mean('lon') sh_nina, sh_nino = enso_index(seasonal) seasonal.close() @@ -642,19 +646,21 @@ def enso_index(seasonal): ########################################################################## nh_enso_uzm = obs_atm.ua.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) -nh_enso_vtzm = obs_atm.ehf.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) +nh_enso_vtzm = obs_atm.ehf.sel( + time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) nh_enso_psl = obs_atm.psl.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) -nh_enso_titles = ['November','December','January','February','March'] -nh_enso_plot_months = [11,12,1,2,3] -nh_enso_axes = [0,90,1000,1] +nh_enso_titles = ['November', 'December', 'January', 'February', 'March'] +nh_enso_plot_months = [11, 12, 1, 2, 3] +nh_enso_axes = [0, 90, 1000, 1] nh_enso_psl_axes = [-180, 180, 20, 90] sh_enso_uzm = obs_atm.ua.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) -sh_enso_vtzm = obs_atm.ehf.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) +sh_enso_vtzm = obs_atm.ehf.sel( + time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) sh_enso_psl = obs_atm.psl.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) -sh_enso_titles = ['September','October','November','December','January'] -sh_enso_plot_months = [9,10,11,12,1] -sh_enso_axes = [-90,0,1000,1] +sh_enso_titles = ['September', 'October', 'November', 'December', 'January'] +sh_enso_plot_months = [9, 10, 11, 12, 1] +sh_enso_axes = [-90, 0, 1000, 1] sh_enso_psl_axes = [-180, 180, -90, -20] uzm_dict = {} @@ -670,48 +676,50 @@ def enso_index(seasonal): psl_dict['SH'] = sh_enso_psl, sh_enso_titles, sh_enso_plot_months, ccrs.SouthPolarStereo(), sh_enso_psl_axes ############################################### -print ('*** Running the observed QBO indexing') +print('*** Running the observed QBO indexing') ############################################### -print (QBOisobar, "QBOisobar") +print(QBOisobar, "QBOisobar") # Subset atmospheric data for user defined isobar # subset = obs_atm.sel(lev=QBOisobar) # Select 5S-5N winds # -tropical_subset = subset.interp(lat=[-5,-2.5,0,2.5,5]) +tropical_subset = subset.interp(lat=[-5, -2.5, 0, 2.5, 5]) # Latitudinally weight and average # qbo = tropical_subset.mean('lat') weights = np.cos(np.deg2rad(tropical_subset.lat.values)) -interim = np.multiply(tropical_subset.ua.values,weights[np.newaxis,:]) -interim = np.nansum(interim,axis=1) -interim = np.true_divide(interim,np.sum(weights)) +interim = np.multiply(tropical_subset.ua.values, weights[np.newaxis, :]) +interim = np.nansum(interim, axis=1) +interim = np.true_divide(interim, np.sum(weights)) qbo.ua.values[:] = interim[:] + def qbo_index(seasonal): + # Create 2-month seasonally averaged standardized QBO anomalies. Weight each month by number of days comprising + # month # + day_in_month_weights = seasonal.time.dt.days_in_month.values[:2] / np.sum(seasonal.time.dt.days_in_month.values[:2]) + qboindex = np.reshape(seasonal.ua.values, (int(len(seasonal.ua.values) / 2), 2)) + qboindex = np.nanmean(np.multiply(qboindex, day_in_month_weights[np.newaxis, :]), axis=1) + anom = np.subtract(qboindex, np.nanmean(qboindex)) + anom = np.true_divide(anom, np.nanstd(qboindex)) - # Create 2-month seasonally averaged standardized QBO anomalies. Weight each month by number of days comprising month # - day_in_month_weights = seasonal.time.dt.days_in_month.values[:2]/np.sum(seasonal.time.dt.days_in_month.values[:2]) - qboindex = np.reshape(seasonal.ua.values,(int(len(seasonal.ua.values)/2),2)) - qboindex = np.nanmean(np.multiply(qboindex,day_in_month_weights[np.newaxis,:]),axis=1) - anom = np.subtract(qboindex,np.nanmean(qboindex)) - anom = np.true_divide(anom,np.nanstd(qboindex)) + # Get the unique years from "seasonal" and then remove the last one, which is not needed + years = [v for v in set(np.sort(seasonal.time.dt.year.values))] + eqbo_years = [years[i] for i, v in enumerate(anom) if v <= -1] + wqbo_years = [years[i] for i, v in enumerate(anom) if v >= 1] - # Get the unique years from "seasonal" and then remove the last one, which is not needed - years = [v for v in set(np.sort(seasonal.time.dt.year.values))] - eqbo_years = [years[i] for i,v in enumerate(anom) if v <= -1] - wqbo_years = [years[i] for i,v in enumerate(anom) if v >= 1] + return eqbo_years, wqbo_years - return eqbo_years, wqbo_years # Subsample QBO data for NH # -seasonal = qbo.sel(time=qbo.time.dt.month.isin([10,11])).mean('lon') +seasonal = qbo.sel(time=qbo.time.dt.month.isin([10, 11])).mean('lon') nh_eqbo, nh_wqbo = qbo_index(seasonal) seasonal.close() # Subsample QBO data for SH # -seasonal = qbo.sel(time=qbo.time.dt.month.isin([7,8])).mean('lon') +seasonal = qbo.sel(time=qbo.time.dt.month.isin([7, 8])).mean('lon') sh_eqbo, sh_wqbo = qbo_index(seasonal) seasonal.close() @@ -732,17 +740,17 @@ def qbo_index(seasonal): nh_qbo_uzm = obs_atm.ua.sel(time=slice('%s-10-01' % date_first.dt.year.values, '%s-02-28' % date_last.dt.year.values)) nh_qbo_vtzm = obs_atm.ehf.sel(time=slice('%s-10-01' % date_first.dt.year.values, '%s-02-28' % date_last.dt.year.values)) nh_qbo_psl = obs_atm.psl.sel(time=slice('%s-10-01' % date_first.dt.year.values, '%s-02-28' % date_last.dt.year.values)) -nh_qbo_titles = ['October','November','December','January','February'] -nh_qbo_plot_months = [10,11,12,1,2] -nh_qbo_axes = [0,90,1000,1] +nh_qbo_titles = ['October', 'November', 'December', 'January', 'February'] +nh_qbo_plot_months = [10, 11, 12, 1, 2] +nh_qbo_axes = [0, 90, 1000, 1] nh_qbo_psl_axes = [-180, 180, 20, 90] sh_qbo_uzm = obs_atm.ua.sel(time=slice('%s-07-01' % date_first.dt.year.values, '%s-11-30' % date_last.dt.year.values)) sh_qbo_vtzm = obs_atm.ehf.sel(time=slice('%s-07-01' % date_first.dt.year.values, '%s-11-30' % date_last.dt.year.values)) sh_qbo_psl = obs_atm.psl.sel(time=slice('%s-07-01' % date_first.dt.year.values, '%s-11-30' % date_last.dt.year.values)) -sh_qbo_titles = ['July','August','September','October','November'] -sh_qbo_plot_months = [7,8,9,10,11] -sh_qbo_axes = [-90,0,1000,1] +sh_qbo_titles = ['July', 'August', 'September', 'October', 'November'] +sh_qbo_plot_months = [7, 8, 9, 10, 11] +sh_qbo_axes = [-90, 0, 1000, 1] sh_qbo_psl_axes = [-180, 180, -90, -20] uzm_qbo_dict = {} @@ -757,80 +765,84 @@ def qbo_index(seasonal): psl_qbo_dict['NH'] = nh_qbo_psl, nh_qbo_titles, nh_qbo_plot_months, ccrs.NorthPolarStereo(), nh_qbo_psl_axes psl_qbo_dict['SH'] = sh_qbo_psl, sh_qbo_titles, sh_qbo_plot_months, ccrs.SouthPolarStereo(), sh_qbo_psl_axes -hemispheres = ['NH','SH'] +hemispheres = ['NH', 'SH'] for hemi in hemispheres: - - ############################################### - print ('*** Calling the observed ENSO indices') - ############################################### - obs_nina, obs_nino = enso_dict[hemi] - - print (obs_nina,'obs_nina') - print (obs_nino, 'obs_nino') - - ################################################################### - print ('*** Running the observed ENSO zonal mean zonal wind calcs') - ################################################################### - - obstos_plot = f'{plot_dir}/obs-enso34-uzm-{FIRSTYR}-{LASTYR}-%s.png' % hemi - out_fig, out_ax = enso_uzm(uzm_dict[hemi][0],obs_nina,obs_nino,uzm_dict[hemi][1],uzm_dict[hemi][2],uzm_dict[hemi][3]) - out_fig.savefig(obstos_plot,dpi=700) - - ############################################################ - print ('*** Running the observed ENSO eddy heat flux calcs') - ############################################################ - obsvt_plot = f'{plot_dir}/obs-enso34-vt-{FIRSTYR}-{LASTYR}-%s.png' % hemi - out_fig, out_ax = enso_vt(vtzm_dict[hemi][0],obs_nina,obs_nino,vtzm_dict[hemi][1],vtzm_dict[hemi][2],vtzm_dict[hemi][3]) - out_fig.savefig(obsvt_plot,dpi=700) - - ########################################################## - print ('*** Running the observed ENSO sea level pressure') - ########################################################## - obsps_plot = f'{plot_dir}/obs-enso34-psl-{FIRSTYR}-{LASTYR}-%s.png' % hemi - out_fig, out_ax = enso_slp(psl_dict[hemi][0],obs_nina,obs_nino,psl_dict[hemi][1],psl_dict[hemi][2],psl_dict[hemi][3],psl_dict[hemi][4]) - out_fig.savefig(obsps_plot,dpi=700) - - ############################################## - print ('*** Calling the observed QBO indices') - ############################################## - obs_eqbo, obs_wqbo = qbo_dict[hemi] - - print (obs_eqbo,'obs_eqbo') - print (obs_wqbo, 'obs_wqbo') - - ##################################################################### - print ('*** Running the observed QBO zonal mean zonal wind plotting') - ##################################################################### - uzm_plot = f'{plot_dir}/obs-qbo{QBOisobar}hPa-uzm-{FIRSTYR}-{LASTYR}-%s.png' % hemi - out_fig, out_ax = qbo_uzm(uzm_qbo_dict[hemi][0],obs_eqbo,obs_wqbo,QBOisobar,uzm_qbo_dict[hemi][1],uzm_qbo_dict[hemi][2],uzm_qbo_dict[hemi][3]) - out_fig.savefig(uzm_plot,dpi=700) - - ######################################################################### - print ('*** Running the observed QBO zonal mean eddy heat flux plotting') - ######################################################################### - vtzm_plot = f'{plot_dir}/obs-qbo{QBOisobar}hPa-vt-{FIRSTYR}-{LASTYR}-%s.png' % hemi - out_fig, out_ax = qbo_vt(vtzm_qbo_dict[hemi][0],obs_eqbo,obs_wqbo,QBOisobar,vtzm_qbo_dict[hemi][1],vtzm_qbo_dict[hemi][2],vtzm_qbo_dict[hemi][3]) - out_fig.savefig(vtzm_plot,dpi=700) - - ################################################################## - print ('*** Running the observed QBO sea level pressure plotting') - ################################################################## - psl_plot = f'{plot_dir}/obs-qbo{QBOisobar}hPa-psl-{FIRSTYR}-{LASTYR}-%s.png' % hemi - out_fig, out_ax = qbo_slp(psl_qbo_dict[hemi][0],obs_eqbo,obs_wqbo,QBOisobar,psl_qbo_dict[hemi][1],psl_qbo_dict[hemi][2],psl_qbo_dict[hemi][3],psl_qbo_dict[hemi][4]) - out_fig.savefig(psl_plot,dpi=700) - + ############################################### + print('*** Calling the observed ENSO indices') + ############################################### + obs_nina, obs_nino = enso_dict[hemi] + + print(obs_nina, 'obs_nina') + print(obs_nino, 'obs_nino') + + ################################################################### + print('*** Running the observed ENSO zonal mean zonal wind calcs') + ################################################################### + + obstos_plot = f'{plot_dir}/obs-enso34-uzm-{FIRSTYR}-{LASTYR}-%s.png' % hemi + out_fig, out_ax = enso_uzm(uzm_dict[hemi][0], obs_nina, obs_nino, uzm_dict[hemi][1], uzm_dict[hemi][2], + uzm_dict[hemi][3]) + out_fig.savefig(obstos_plot, dpi=700) + + ############################################################ + print('*** Running the observed ENSO eddy heat flux calcs') + ############################################################ + obsvt_plot = f'{plot_dir}/obs-enso34-vt-{FIRSTYR}-{LASTYR}-%s.png' % hemi + out_fig, out_ax = enso_vt(vtzm_dict[hemi][0], obs_nina, obs_nino, vtzm_dict[hemi][1], vtzm_dict[hemi][2], + vtzm_dict[hemi][3]) + out_fig.savefig(obsvt_plot, dpi=700) + + ########################################################## + print('*** Running the observed ENSO sea level pressure') + ########################################################## + obsps_plot = f'{plot_dir}/obs-enso34-psl-{FIRSTYR}-{LASTYR}-%s.png' % hemi + out_fig, out_ax = enso_slp(psl_dict[hemi][0], obs_nina, obs_nino, psl_dict[hemi][1], psl_dict[hemi][2], + psl_dict[hemi][3], psl_dict[hemi][4]) + out_fig.savefig(obsps_plot, dpi=700) + + ############################################## + print('*** Calling the observed QBO indices') + ############################################## + obs_eqbo, obs_wqbo = qbo_dict[hemi] + + print(obs_eqbo, 'obs_eqbo') + print(obs_wqbo, 'obs_wqbo') + + ##################################################################### + print('*** Running the observed QBO zonal mean zonal wind plotting') + ##################################################################### + uzm_plot = f'{plot_dir}/obs-qbo{QBOisobar}hPa-uzm-{FIRSTYR}-{LASTYR}-%s.png' % hemi + out_fig, out_ax = qbo_uzm(uzm_qbo_dict[hemi][0], obs_eqbo, obs_wqbo, QBOisobar, uzm_qbo_dict[hemi][1], + uzm_qbo_dict[hemi][2], uzm_qbo_dict[hemi][3]) + out_fig.savefig(uzm_plot, dpi=700) + + ######################################################################### + print('*** Running the observed QBO zonal mean eddy heat flux plotting') + ######################################################################### + vtzm_plot = f'{plot_dir}/obs-qbo{QBOisobar}hPa-vt-{FIRSTYR}-{LASTYR}-%s.png' % hemi + out_fig, out_ax = qbo_vt(vtzm_qbo_dict[hemi][0], obs_eqbo, obs_wqbo, QBOisobar, vtzm_qbo_dict[hemi][1], + vtzm_qbo_dict[hemi][2], vtzm_qbo_dict[hemi][3]) + out_fig.savefig(vtzm_plot, dpi=700) + + ################################################################## + print('*** Running the observed QBO sea level pressure plotting') + ################################################################## + psl_plot = f'{plot_dir}/obs-qbo{QBOisobar}hPa-psl-{FIRSTYR}-{LASTYR}-%s.png' % hemi + out_fig, out_ax = qbo_slp(psl_qbo_dict[hemi][0], obs_eqbo, obs_wqbo, QBOisobar, psl_qbo_dict[hemi][1], + psl_qbo_dict[hemi][2], psl_qbo_dict[hemi][3], psl_qbo_dict[hemi][4]) + out_fig.savefig(psl_plot, dpi=700) print('*** Running the observed QBO metrics') -metricsout, switch = qbo_metrics(obs_atm,QBOisobar) +metricsout, switch = qbo_metrics(obs_atm, QBOisobar) filepath = f'{plot_dir}/obs-qbo{QBOisobar}hPa-metrics-{FIRSTYR}-{LASTYR}.txt' with open(filepath, 'w') as file_handler: - file_handler.write(f"{'QBO metrics: periodicity and spatial characteristics'}\n") - file_handler.write(f"{' '}\n") - for item in metricsout: - file_handler.write(f"{item}\n") - + file_handler.write(f"{'QBO metrics: periodicity and spatial characteristics'}\n") + file_handler.write(f"{' '}\n") + for item in metricsout: + file_handler.write(f"{item}\n") + ############################################### # Tidy up by closing the open xarray datasets # ############################################### @@ -856,7 +868,7 @@ def qbo_index(seasonal): sh_qbo_psl.close() ########################################################################### -################################## model ################################## +# ################################# model ################################# ########################################################################### plot_dir = f'{WK_DIR}/model/' @@ -880,39 +892,39 @@ def qbo_index(seasonal): # Compute the diagnostics (note, here we assume that all model variables are the same length in time) mod_firstyr = ua.time.dt.year.values[0] mod_lastyr = ua.time.dt.year.values[-1] -print(mod_firstyr,mod_lastyr) -print (FIRSTYR,LASTYR) +print(mod_firstyr, mod_lastyr) +print(FIRSTYR, LASTYR) -ps = psl.sel(time=slice(str(FIRSTYR),str(LASTYR))) -uas = ua.sel(time=slice(str(FIRSTYR),str(LASTYR))).mean('lon') -vas = va.sel(time=slice(str(FIRSTYR),str(LASTYR))) -tas = ta.sel(time=slice(str(FIRSTYR),str(LASTYR))) -toss = tos.sel(time=slice(str(FIRSTYR),str(LASTYR))) +ps = psl.sel(time=slice(str(FIRSTYR), str(LASTYR))) +uas = ua.sel(time=slice(str(FIRSTYR), str(LASTYR))).mean('lon') +vas = va.sel(time=slice(str(FIRSTYR), str(LASTYR))) +tas = ta.sel(time=slice(str(FIRSTYR), str(LASTYR))) +toss = tos.sel(time=slice(str(FIRSTYR), str(LASTYR))) print(f'***Determine whether model pressure levels are in Pa or hPa, convert to hPa') -if getattr(uas.lev,'units') == 'Pa': - print(f'**Converting pressure levels to hPa') - uas = uas.assign_coords({"lev": (uas.lev/100.)}) - uas.lev.attrs['units'] = 'hPa' - vas = vas.assign_coords({"lev": (vas.lev/100.)}) - vas.lev.attrs['units'] = 'hPa' - tas = tas.assign_coords({"lev": (tas.lev/100.)}) - tas.lev.attrs['units'] = 'hPa' - -if getattr(ps.psl,'units') == 'Pa': - print(f'**Converting pressure levels to hPa') - ps.psl.attrs['units'] = 'hPa' - ps.psl.values[:] = ps.psl.values/100. +if getattr(uas.lev, 'units') == 'Pa': + print(f'**Converting pressure levels to hPa') + uas = uas.assign_coords({"lev": (uas.lev / 100.)}) + uas.lev.attrs['units'] = 'hPa' + vas = vas.assign_coords({"lev": (vas.lev / 100.)}) + vas.lev.attrs['units'] = 'hPa' + tas = tas.assign_coords({"lev": (tas.lev / 100.)}) + tas.lev.attrs['units'] = 'hPa' + +if getattr(ps.psl, 'units') == 'Pa': + print(f'**Converting pressure levels to hPa') + ps.psl.attrs['units'] = 'hPa' + ps.psl.values[:] = ps.psl.values / 100. # Create the POD figures directory plot_dir = f'{WK_DIR}/model/' ############################################# -print ('*** Running the model ENSO indexing') +print('*** Running the model ENSO indexing') ############################################# # Extract the tropical domain # -ENSO = toss.isel(lat = np.logical_and(toss.lat >= -5, toss.lat <= 5)) +ENSO = toss.isel(lat=np.logical_and(toss.lat >= -5, toss.lat <= 5)) # Extract date and longitude info from ENSO dataset # date_first = toss.time[0] @@ -921,27 +933,27 @@ def qbo_index(seasonal): # Identify the correct ENSO longitudinal grid # if lon_first < 0: - ENSO = ENSO.sel(lon=slice(-170,-120)) + ENSO = ENSO.sel(lon=slice(-170, -120)) else: - ENSO = ENSO.sel(lon=slice(190,240)) + ENSO = ENSO.sel(lon=slice(190, 240)) # Latitudinally average the ENSO data # weighted_mean = ENSO.mean('lat') weights = np.cos(np.deg2rad(ENSO.lat.values)) -interim = np.multiply(ENSO.tos.values,weights[np.newaxis,:,np.newaxis]) -interim = np.nansum(interim,axis=1) -interim = np.true_divide(interim,np.sum(weights)) +interim = np.multiply(ENSO.tos.values, weights[np.newaxis, :, np.newaxis]) +interim = np.nansum(interim, axis=1) +interim = np.true_divide(interim, np.sum(weights)) weighted_mean.tos.values[:] = interim[:] - + # Subsample ENSO data for NH # seasonal = weighted_mean.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) -seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([11,12,1,2,3])).mean('lon') +seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([11, 12, 1, 2, 3])).mean('lon') nh_nina, nh_nino = enso_index(seasonal) seasonal.close() # Subsample ENSO data for SH # seasonal = weighted_mean.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) -seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([9,10,11,12,1])).mean('lon') +seasonal = seasonal.sel(time=seasonal.time.dt.month.isin([9, 10, 11, 12, 1])).mean('lon') sh_nina, sh_nino = enso_index(seasonal) seasonal.close() @@ -955,24 +967,28 @@ def qbo_index(seasonal): ########################################################################## ######################################################### -print ('*** Doing the model eddy heat flux calculations') +print('*** Doing the model eddy heat flux calculations') ######################################################### -vt = compute_total_eddy_heat_flux(vas.va,tas.ta) +vt = compute_total_eddy_heat_flux(vas.va, tas.ta) -model_nh_enso_uzm = uas.ua.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) +model_nh_enso_uzm = uas.ua.sel( + time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) model_nh_enso_vtzm = vt.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) -model_nh_enso_psl = ps.psl.sel(time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) -model_nh_enso_titles = ['November','December','January','February','March'] -model_nh_enso_plot_months = [11,12,1,2,3] -model_nh_enso_axes = [0,90,1000,1] +model_nh_enso_psl = ps.psl.sel( + time=slice('%s-11-01' % date_first.dt.year.values, '%s-03-31' % date_last.dt.year.values)) +model_nh_enso_titles = ['November', 'December', 'January', 'February', 'March'] +model_nh_enso_plot_months = [11, 12, 1, 2, 3] +model_nh_enso_axes = [0, 90, 1000, 1] model_nh_enso_psl_axes = [-180, 180, 20, 90] -model_sh_enso_uzm = uas.ua.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) +model_sh_enso_uzm = uas.ua.sel( + time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) model_sh_enso_vtzm = vt.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) -model_sh_enso_psl = ps.psl.sel(time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) -model_sh_enso_titles = ['September','October','November','December','January'] -model_sh_enso_plot_months = [9,10,11,12,1] -model_sh_enso_axes = [-90,0,1000,1] +model_sh_enso_psl = ps.psl.sel( + time=slice('%s-09-01' % date_first.dt.year.values, '%s-01-31' % date_last.dt.year.values)) +model_sh_enso_titles = ['September', 'October', 'November', 'December', 'January'] +model_sh_enso_plot_months = [9, 10, 11, 12, 1] +model_sh_enso_axes = [-90, 0, 1000, 1] model_sh_enso_psl_axes = [-180, 180, -90, -20] model_uzm_dict = {} @@ -984,8 +1000,12 @@ def qbo_index(seasonal): model_vtzm_dict['SH'] = model_sh_enso_vtzm, model_sh_enso_titles, model_sh_enso_plot_months, model_sh_enso_axes model_psl_dict = {} -model_psl_dict['NH'] = model_nh_enso_psl, model_nh_enso_titles, model_nh_enso_plot_months, ccrs.NorthPolarStereo(), model_nh_enso_psl_axes -model_psl_dict['SH'] = model_sh_enso_psl, model_sh_enso_titles, model_sh_enso_plot_months, ccrs.SouthPolarStereo(), model_sh_enso_psl_axes +model_psl_dict[ + 'NH'] = (model_nh_enso_psl, model_nh_enso_titles, model_nh_enso_plot_months, ccrs.NorthPolarStereo(), + model_nh_enso_psl_axes) +model_psl_dict[ + 'SH'] = (model_sh_enso_psl, model_sh_enso_titles, model_sh_enso_plot_months, ccrs.SouthPolarStereo(), + model_sh_enso_psl_axes) ######################################################################### # Define QBO plotting parameters to be passed to the plotting functions # @@ -994,166 +1014,178 @@ def qbo_index(seasonal): model_nh_qbo_uzm = uas.ua.sel(time=slice('%s-10-01' % date_first.dt.year.values, '%s-02-28' % date_last.dt.year.values)) model_nh_qbo_vtzm = vt.sel(time=slice('%s-10-01' % date_first.dt.year.values, '%s-02-28' % date_last.dt.year.values)) model_nh_qbo_psl = ps.psl.sel(time=slice('%s-10-01' % date_first.dt.year.values, '%s-02-28' % date_last.dt.year.values)) -model_nh_qbo_titles = ['October','November','December','January','February'] -model_nh_qbo_plot_months = [10,11,12,1,2] -model_nh_qbo_axes = [0,90,1000,1] +model_nh_qbo_titles = ['October', 'November', 'December', 'January', 'February'] +model_nh_qbo_plot_months = [10, 11, 12, 1, 2] +model_nh_qbo_axes = [0, 90, 1000, 1] model_nh_qbo_psl_axes = [-180, 180, 20, 90] model_sh_qbo_uzm = uas.ua.sel(time=slice('%s-07-01' % date_first.dt.year.values, '%s-11-30' % date_last.dt.year.values)) model_sh_qbo_vtzm = vt.sel(time=slice('%s-07-01' % date_first.dt.year.values, '%s-11-30' % date_last.dt.year.values)) model_sh_qbo_psl = ps.psl.sel(time=slice('%s-07-01' % date_first.dt.year.values, '%s-11-30' % date_last.dt.year.values)) -model_sh_qbo_titles = ['July','August','September','October','November'] -model_sh_qbo_plot_months = [7,8,9,10,11] -model_sh_qbo_axes = [-90,0,1000,1] +model_sh_qbo_titles = ['July', 'August', 'September', 'October', 'November'] +model_sh_qbo_plot_months = [7, 8, 9, 10, 11] +model_sh_qbo_axes = [-90, 0, 1000, 1] model_sh_qbo_psl_axes = [-180, 180, -90, -20] model_uzm_qbo_dict = {} model_uzm_qbo_dict['NH'] = model_nh_qbo_uzm, model_nh_qbo_titles, model_nh_qbo_plot_months, model_nh_qbo_axes model_uzm_qbo_dict['SH'] = model_sh_qbo_uzm, model_sh_qbo_titles, model_sh_qbo_plot_months, model_sh_qbo_axes -print (model_uzm_qbo_dict) +print(model_uzm_qbo_dict) model_vtzm_qbo_dict = {} model_vtzm_qbo_dict['NH'] = model_nh_qbo_vtzm, model_nh_qbo_titles, model_nh_qbo_plot_months, model_nh_qbo_axes model_vtzm_qbo_dict['SH'] = model_sh_qbo_vtzm, model_sh_qbo_titles, model_sh_qbo_plot_months, model_sh_qbo_axes -print (model_vtzm_qbo_dict) +print(model_vtzm_qbo_dict) model_psl_qbo_dict = {} -model_psl_qbo_dict['NH'] = model_nh_qbo_psl, model_nh_qbo_titles, model_nh_qbo_plot_months, ccrs.NorthPolarStereo(), model_nh_qbo_psl_axes -model_psl_qbo_dict['SH'] = model_sh_qbo_psl, model_sh_qbo_titles, model_sh_qbo_plot_months, ccrs.SouthPolarStereo(), model_sh_qbo_psl_axes -print (model_psl_qbo_dict) - +model_psl_qbo_dict[ + 'NH'] = (model_nh_qbo_psl, model_nh_qbo_titles, model_nh_qbo_plot_months, ccrs.NorthPolarStereo(), + model_nh_qbo_psl_axes) +model_psl_qbo_dict[ + 'SH'] = (model_sh_qbo_psl, model_sh_qbo_titles, model_sh_qbo_plot_months, ccrs.SouthPolarStereo(), + model_sh_qbo_psl_axes) +print(model_psl_qbo_dict) -hemispheres = ['NH','SH'] +hemispheres = ['NH', 'SH'] for hemi in hemispheres: - - ############################################ - print ('*** Calling the model ENSO indices') - ############################################ - model_nina, model_nino = model_enso_dict[hemi] - - print (model_nina,'model_nina') - print (model_nino, 'model_nino') - - ################################################################ - print ('*** Running the model ENSO zonal mean zonal wind calcs') - ################################################################ - out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-enso34-uzm-%s.png' % hemi - out_fig, out_ax = enso_uzm(model_uzm_dict[hemi][0],model_nina,model_nino,model_uzm_dict[hemi][1],model_uzm_dict[hemi][2],model_uzm_dict[hemi][3]) - out_fig.savefig(out_plot,dpi=700) - - ######################################################### - print ('*** Running the model ENSO eddy heat flux calcs') - ######################################################### - out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-enso34-vt-%s.png' % hemi - out_fig, out_ax = enso_vt(model_vtzm_dict[hemi][0],model_nina,model_nino,model_vtzm_dict[hemi][1],model_vtzm_dict[hemi][2],model_vtzm_dict[hemi][3]) - out_fig.savefig(out_plot,dpi=700) - - ####################################################### - print ('*** Running the model ENSO sea level pressure') - ####################################################### - out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-enso34-psl-%s.png' % hemi - out_fig, out_ax = enso_slp(model_psl_dict[hemi][0],model_nina,model_nino,model_psl_dict[hemi][1],model_psl_dict[hemi][2],model_psl_dict[hemi][3],model_psl_dict[hemi][4]) - out_fig.savefig(out_plot,dpi=700) + ############################################ + print('*** Calling the model ENSO indices') + ############################################ + model_nina, model_nino = model_enso_dict[hemi] + + print(model_nina, 'model_nina') + print(model_nino, 'model_nino') + + ################################################################ + print('*** Running the model ENSO zonal mean zonal wind calcs') + ################################################################ + out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-enso34-uzm-%s.png' % hemi + out_fig, out_ax = enso_uzm(model_uzm_dict[hemi][0], model_nina, model_nino, model_uzm_dict[hemi][1], + model_uzm_dict[hemi][2], model_uzm_dict[hemi][3]) + out_fig.savefig(out_plot, dpi=700) + + ######################################################### + print('*** Running the model ENSO eddy heat flux calcs') + ######################################################### + out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-enso34-vt-%s.png' % hemi + out_fig, out_ax = enso_vt(model_vtzm_dict[hemi][0], model_nina, model_nino, model_vtzm_dict[hemi][1], + model_vtzm_dict[hemi][2], model_vtzm_dict[hemi][3]) + out_fig.savefig(out_plot, dpi=700) + + ####################################################### + print('*** Running the model ENSO sea level pressure') + ####################################################### + out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-enso34-psl-%s.png' % hemi + out_fig, out_ax = enso_slp(model_psl_dict[hemi][0], model_nina, model_nino, model_psl_dict[hemi][1], + model_psl_dict[hemi][2], model_psl_dict[hemi][3], model_psl_dict[hemi][4]) + out_fig.savefig(out_plot, dpi=700) ########################################## print('*** Running the model QBO metrics') ########################################## -metricsout, switch = qbo_metrics(uas,QBOisobar) +metricsout, switch = qbo_metrics(uas, QBOisobar) filepath = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-metrics.txt' with open(filepath, 'w') as file_handler: - file_handler.write(f"{'QBO metrics: periodicity and spatial characteristics'}\n") - file_handler.write(f"{' '}\n") - for item in metricsout: - file_handler.write(f"{item}\n") + file_handler.write(f"{'QBO metrics: periodicity and spatial characteristics'}\n") + file_handler.write(f"{' '}\n") + for item in metricsout: + file_handler.write(f"{item}\n") if switch == 1: - ################################################################################### - print ('*** A model QBO was detected so POD is now running the model QBO indexing') - ################################################################################### - - print (QBOisobar, "QBOisobar") - - # Subset atmospheric data for user defined isobar # - subset = uas.sel(lev=QBOisobar) - - # Select 5S-5N winds # - tropical_subset = subset.interp(lat=[-5,-2.5,0,2.5,5]) - - # Latitudinally weight and average # - qbo = tropical_subset.mean('lat') - weights = np.cos(np.deg2rad(tropical_subset.lat.values)) - interim = np.multiply(tropical_subset.ua.values,weights[np.newaxis,:]) - interim = np.nansum(interim,axis=1) - interim = np.true_divide(interim,np.sum(weights)) - qbo.ua.values[:] = interim[:] - - def qbo_index(seasonal): - - # Create 2-month seasonally averaged standardized QBO anomalies. Weight each month by number of days comprising month # - day_in_month_weights = seasonal.time.dt.days_in_month.values[:2]/np.sum(seasonal.time.dt.days_in_month.values[:2]) - qboindex = np.reshape(seasonal.ua.values,(int(len(seasonal.ua.values)/2),2)) - qboindex = np.nanmean(np.multiply(qboindex,day_in_month_weights[np.newaxis,:]),axis=1) - anom = np.subtract(qboindex,np.nanmean(qboindex)) - anom = np.true_divide(anom,np.nanstd(qboindex)) - - # Get the unique years from "seasonal" and then remove the last one, which is not needed - years = [v for v in set(np.sort(seasonal.time.dt.year.values))] - eqbo_years = [years[i] for i,v in enumerate(anom) if v <= -1] - wqbo_years = [years[i] for i,v in enumerate(anom) if v >= 1] - - return eqbo_years, wqbo_years - - # Subsample QBO data for NH # - seasonal = qbo.sel(time=qbo.time.dt.month.isin([10,11])) - model_nh_eqbo, model_nh_wqbo = qbo_index(seasonal) - seasonal.close() - - # Subsample QBO data for SH # - seasonal = qbo.sel(time=qbo.time.dt.month.isin([7,8])) - model_sh_eqbo, model_sh_wqbo = qbo_index(seasonal) - seasonal.close() - - for hemi in hemispheres: - - # Store the Nina/Nino years in a dictionary to call later # - model_qbo_dict = {} - model_qbo_dict['NH'] = model_nh_eqbo, model_nh_wqbo - model_qbo_dict['SH'] = model_sh_eqbo, model_sh_wqbo - - ############################################ - print ('*** Running the model QBO indexing') - ############################################ - model_eqbo, model_wqbo = model_qbo_dict[hemi] - - print (model_eqbo, 'model_eqbo') - print (model_wqbo, 'model_wqbo') - - ################################################################## - print ('*** Running the model QBO zonal mean zonal wind plotting') - ################################################################## - out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-uzm-%s.png' % hemi - out_fig, out_ax = qbo_uzm(model_uzm_qbo_dict[hemi][0],model_eqbo,model_wqbo,QBOisobar,model_uzm_qbo_dict[hemi][1],model_uzm_qbo_dict[hemi][2],model_uzm_qbo_dict[hemi][3]) - out_fig.savefig(out_plot,dpi=700) - - ###################################################################### - print ('*** Running the model QBO zonal mean eddy heat flux plotting') - ###################################################################### - out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-vt-%s.png' % hemi - out_fig, out_ax = qbo_vt(model_vtzm_qbo_dict[hemi][0],model_eqbo,model_wqbo,QBOisobar,model_vtzm_qbo_dict[hemi][1],model_vtzm_qbo_dict[hemi][2],model_vtzm_qbo_dict[hemi][3]) - out_fig.savefig(out_plot,dpi=700) - - print ('*** Running the model QBO sea level pressure plotting') - out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-psl-%s.png' % hemi - out_fig, out_ax = qbo_slp(model_psl_qbo_dict[hemi][0],model_eqbo,model_wqbo,QBOisobar,model_psl_qbo_dict[hemi][1],model_psl_qbo_dict[hemi][2],model_psl_qbo_dict[hemi][3],model_psl_qbo_dict[hemi][4]) - out_fig.savefig(out_plot,dpi=700) - + ################################################################################### + print('*** A model QBO was detected so POD is now running the model QBO indexing') + ################################################################################### + + print(QBOisobar, "QBOisobar") + + # Subset atmospheric data for user defined isobar # + subset = uas.sel(lev=QBOisobar) + + # Select 5S-5N winds # + tropical_subset = subset.interp(lat=[-5, -2.5, 0, 2.5, 5]) + + # Latitudinally weight and average # + qbo = tropical_subset.mean('lat') + weights = np.cos(np.deg2rad(tropical_subset.lat.values)) + interim = np.multiply(tropical_subset.ua.values, weights[np.newaxis, :]) + interim = np.nansum(interim, axis=1) + interim = np.true_divide(interim, np.sum(weights)) + qbo.ua.values[:] = interim[:] + + + def qbo_index(seasonal): + + # Create 2-month seasonally averaged standardized QBO anomalies. Weight each month by number of days comprising + # month # + day_in_month_weights = seasonal.time.dt.days_in_month.values[:2] / np.sum( + seasonal.time.dt.days_in_month.values[:2]) + qboindex = np.reshape(seasonal.ua.values, (int(len(seasonal.ua.values) / 2), 2)) + qboindex = np.nanmean(np.multiply(qboindex, day_in_month_weights[np.newaxis, :]), axis=1) + anom = np.subtract(qboindex, np.nanmean(qboindex)) + anom = np.true_divide(anom, np.nanstd(qboindex)) + + # Get the unique years from "seasonal" and then remove the last one, which is not needed + years = [v for v in set(np.sort(seasonal.time.dt.year.values))] + eqbo_years = [years[i] for i, v in enumerate(anom) if v <= -1] + wqbo_years = [years[i] for i, v in enumerate(anom) if v >= 1] + + return eqbo_years, wqbo_years + + + # Subsample QBO data for NH # + seasonal = qbo.sel(time=qbo.time.dt.month.isin([10, 11])) + model_nh_eqbo, model_nh_wqbo = qbo_index(seasonal) + seasonal.close() + + # Subsample QBO data for SH # + seasonal = qbo.sel(time=qbo.time.dt.month.isin([7, 8])) + model_sh_eqbo, model_sh_wqbo = qbo_index(seasonal) + seasonal.close() + + for hemi in hemispheres: + # Store the Nina/Nino years in a dictionary to call later # + model_qbo_dict = {} + model_qbo_dict['NH'] = model_nh_eqbo, model_nh_wqbo + model_qbo_dict['SH'] = model_sh_eqbo, model_sh_wqbo + + ############################################ + print('*** Running the model QBO indexing') + ############################################ + model_eqbo, model_wqbo = model_qbo_dict[hemi] + + print(model_eqbo, 'model_eqbo') + print(model_wqbo, 'model_wqbo') + + ################################################################## + print('*** Running the model QBO zonal mean zonal wind plotting') + ################################################################## + out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-uzm-%s.png' % hemi + out_fig, out_ax = qbo_uzm(model_uzm_qbo_dict[hemi][0], model_eqbo, model_wqbo, QBOisobar, + model_uzm_qbo_dict[hemi][1], model_uzm_qbo_dict[hemi][2], model_uzm_qbo_dict[hemi][3]) + out_fig.savefig(out_plot, dpi=700) + + ###################################################################### + print('*** Running the model QBO zonal mean eddy heat flux plotting') + ###################################################################### + out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-vt-%s.png' % hemi + out_fig, out_ax = qbo_vt(model_vtzm_qbo_dict[hemi][0], model_eqbo, model_wqbo, QBOisobar, + model_vtzm_qbo_dict[hemi][1], model_vtzm_qbo_dict[hemi][2], + model_vtzm_qbo_dict[hemi][3]) + out_fig.savefig(out_plot, dpi=700) + + print('*** Running the model QBO sea level pressure plotting') + out_plot = f'{plot_dir}/{CASENAME}-{FIRSTYR}-{LASTYR}-qbo{QBOisobar}hPa-psl-%s.png' % hemi + out_fig, out_ax = qbo_slp(model_psl_qbo_dict[hemi][0], model_eqbo, model_wqbo, QBOisobar, + model_psl_qbo_dict[hemi][1], model_psl_qbo_dict[hemi][2], model_psl_qbo_dict[hemi][3], + model_psl_qbo_dict[hemi][4]) + out_fig.savefig(out_plot, dpi=700) + if switch == 0: + print("No QBO detected in the model data. As a result, QBO Ubar, v'T', ans SLP plots were not made.") - print ("No QBO detected in the model data. As a result, QBO Ubar, v'T', ans SLP plots were not made.") - ################################### # Prepare the output dictionaries # ################################### @@ -1166,52 +1198,51 @@ def qbo_index(seasonal): # Saving some of the data # ########################### -vt_data['NH'] = vt.sel(lat = np.logical_and(vt.lat >= 0, vt.lat <= 90)) -vt_data['SH'] = vt.sel(lat = np.logical_and(vt.lat >= -90, vt.lat <= 0)) -vt_out = xr.concat([vt_data['SH'], vt_data['NH']], dim='hemi') +vt_data['NH'] = vt.sel(lat=np.logical_and(vt.lat >= 0, vt.lat <= 90)) +vt_data['SH'] = vt.sel(lat=np.logical_and(vt.lat >= -90, vt.lat <= 0)) +vt_out = xr.concat([vt_data['SH'], vt_data['NH']], dim='hemi') vt_out.name = 'vt_out' vt_out.attrs['units'] = 'Km s**-1' vt_out.attrs['long_name'] = 'Pole to equator zonal-mean eddy heat flux' -uzm_data['NH'] = uas.ua.sel(lat = np.logical_and(uas.lat >= 0, uas.lat <= 90)) -uzm_data['SH'] = uas.ua.sel(lat = np.logical_and(uas.lat >= -90, uas.lat <= 0)) -uzm_out = xr.concat([uzm_data['SH'], uzm_data['NH']], dim='hemi') +uzm_data['NH'] = uas.ua.sel(lat=np.logical_and(uas.lat >= 0, uas.lat <= 90)) +uzm_data['SH'] = uas.ua.sel(lat=np.logical_and(uas.lat >= -90, uas.lat <= 0)) +uzm_out = xr.concat([uzm_data['SH'], uzm_data['NH']], dim='hemi') uzm_out.name = 'uzm_out' uzm_out.attrs['units'] = 'm s**-1' uzm_out.attrs['long_name'] = 'Pole to equator zonal-mean zonal wind' -slp_data['NH'] = ps.psl.sel(lat = np.logical_and(ps.lat >= 20, ps.lat <= 90)) -slp_data['SH'] = ps.psl.sel(lat = np.logical_and(ps.lat >= -90, ps.lat <= -20)) -slp_out = xr.concat([slp_data['SH'], slp_data['NH']], dim='hemi') +slp_data['NH'] = ps.psl.sel(lat=np.logical_and(ps.lat >= 20, ps.lat <= 90)) +slp_data['SH'] = ps.psl.sel(lat=np.logical_and(ps.lat >= -90, ps.lat <= -20)) +slp_out = xr.concat([slp_data['SH'], slp_data['NH']], dim='hemi') slp_out.name = 'slp_out' slp_out.attrs['units'] = 'hPa' slp_out.attrs['long_name'] = 'Pole to 20N/S sea level pressure' -qbo_out = uas.ua.interp(lat=[-5,-2.5,0,2.5,5]).sel(lev=QBOisobar) +qbo_out = uas.ua.interp(lat=[-5, -2.5, 0, 2.5, 5]).sel(lev=QBOisobar) qbo_out.name = 'qbo_out' qbo_out.attrs['units'] = 'm s**-1' qbo_out.attrs['long_name'] = f'5S to 5N {QBOisobar} hPa zonal-mean zonal wind' -print (qbo_out, 'qbo_out') +print(qbo_out, 'qbo_out') -out_ds = xr.merge([vt_out,uzm_out,slp_out,qbo_out]) -print ('OUT_DS') -print (out_ds) -print (' ') -print (' ') +out_ds = xr.merge([vt_out, uzm_out, slp_out, qbo_out]) +print('OUT_DS') +print(out_ds) +print(' ') +print(' ') print('*** Preparing to save derived data') data_dir = f'{WK_DIR}/model/netCDF' -outfile = data_dir+f'/{CASENAME}_qbo-enso_diagnostics.nc' +outfile = data_dir + f'/{CASENAME}_qbo-enso_diagnostics.nc' -encoding = {'vt_out': {'dtype':'float32'}, - 'uzm_out': {'dtype':'float32'}, - 'slp_out': {'dtype':'float32'}, - 'qbo_out': {'dtype':'float32'}} +encoding = {'vt_out': {'dtype': 'float32'}, + 'uzm_out': {'dtype': 'float32'}, + 'slp_out': {'dtype': 'float32'}, + 'qbo_out': {'dtype': 'float32'}} print(f'*** Saving qbo-enso diagnostic data to {outfile}') out_ds.to_netcdf(outfile, encoding=encoding) - print('\n=====================================') print('END stc_qbo_enso.py ') -print('=====================================\n') \ No newline at end of file +print('=====================================\n') diff --git a/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeenso.py b/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeenso.py index 35f797de7..3fae0f50e 100644 --- a/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeenso.py +++ b/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeenso.py @@ -7,7 +7,6 @@ enso_vt: plots the zonally averaged eddy heat flux responses to the ENSO as a function of month and ENSO phase ''' - import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt @@ -16,752 +15,767 @@ from cartopy.util import add_cyclic_point from scipy import stats + ################################################################################################## ################################################################################################## ################################################################################################## -def enso_uzm(uzm,negative_indices,positive_indices,titles,plot_months,axes): - - r""" Compute the zonal mean zonal wind response to ENSO. Anomalies are defined as - deviations from the seasonal cycle and composites of anomalies are made during La Nina years, - El Nino years, and then their difference is shown. Stippling is used to denote statistical - significance on the La Nina minus El Nino composites. - - Parameters - ---------- - uzm : xarray.DataArray - The zonal mean zonal wind. - - negative_indices : list - A list of La Nina years. - - positive_indices : list - A list of El Nino years. - - titles : list of strings - A list of month names that will be used as the titles for each column of subplots - - plot_months : list - A list of numbers corresponding to each month (e.g., 10 = october) - - axes : A list of numbers used to set the pressure and latitude limits of the subplots. - [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere - - Returns - ------- - 3 row by 6 column plot of La Nina uzm anomalies (top row), El Nino uzm anomalies (middle), - and their difference (bottom row) with stippling highlighting differences between El Nino - and La Nina winds statistically significant at the 95% level using a two-sided t-test. - - Notes - ----- - The input field uzm is assumed to have dimensions named "lat" and "lev". - E.g., if your data has dimensions "latitude" and/or "level", - use the rename method: - ds.rename({'latitude':'lat','level':'lev'}) - """ - - nina_out = [] - nino_out = [] - diff_out = [] - sigs_out = [] - clim_out = [] - - for mon in plot_months: - - clim = uzm.sel(time=uzm.time.dt.month.isin([mon])) - clim_out.append(clim.mean('time').values) - - anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") - - if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: - tmp_negative_indices = np.add(negative_indices,0) - tmp_positive_indices = np.add(positive_indices,0) - if mon == 1 or mon == 2 or mon == 3: - tmp_negative_indices = np.add(negative_indices,1) - tmp_positive_indices = np.add(positive_indices,1) - - nina_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) - nino_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) - - t,p = stats.ttest_ind(nina_tmp.values,nino_tmp.values,axis=0,nan_policy='omit') - - sigs_out.append(np.subtract(1,p)) - diff_out.append(np.subtract(nina_tmp.mean('time').values,nino_tmp.mean('time').values)) - nina_out.append(nina_tmp.mean('time').values) - nino_out.append(nino_tmp.mean('time').values) - - clim.close() - anom.close() - nina_tmp.close() - nino_tmp.close() - uzm.close() - - nina_out = np.array(nina_out) - nino_out = np.array(nino_out) - diff_out = np.array(diff_out) - sigs_out = np.array(sigs_out) - clim_out = np.array(clim_out) - - ############# Begin the plotting ############ - - fig, ax = plt.subplots() - - mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - - vmin = -10 - vmax = 10 - vlevs = np.linspace(vmin,vmax,num=21) - vlevs = [v for v in vlevs if v != 0] - ticks = [vmin,vmin/2,0,vmax/2,vmax] - - cmin = -200 - cmax = 200 - clevs = np.linspace(cmin,cmax,num=41) - clevs = [v for v in clevs if v != 0] - - plt.suptitle('ENSO zonal-mean zonal wind (m/s)',fontsize=12,fontweight='normal') - - # Add colormap # - from palettable.colorbrewer.diverging import RdBu_11 - cmap1=RdBu_11.mpl_colormap.reversed() - - x, y = np.meshgrid(uzm.lat.values,uzm.lev.values) - uzm.close() - - cols = [0,1,2,3,4] - - for i in cols: - - print (i) - - # Nina # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(0,cols[i])) - plt.title('%s' % titles[i],fontsize=10,y=0.93,fontweight='normal') - - cs = plt.contourf(x,y,nina_out[i],cmap=cmap1,levels=vlevs,extend="both",vmin=vmin,vmax=vmax,zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nina (%s seasons)' % int(len(negative_indices)),fontsize=10) - - # Nino # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(1,cols[i])) - - cs = plt.contourf(x,y,nino_out[i],cmap=cmap1,levels=vlevs,extend="both",vmin=vmin,vmax=vmax,zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nino (%s seasons)' % int(len(positive_indices)),fontsize=10) - - # Diff: Nina minus Nino # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(2,cols[i])) - - cs = plt.contourf(x,y,diff_out[i],cmap=cmap1,levels=vlevs,extend="both",vmin=vmin,vmax=vmax,zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - plt.xlabel('Latitude',fontsize=8,fontweight='normal') - - sig_levs = [0.95,1] - mpl.rcParams['hatch.linewidth'] = 0.2 - plt.contourf(x,y,sigs_out[i],colors='black',vmin=0.95,vmax=1,levels=sig_levs,hatches=['......','......'],alpha=0.0) - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nina - Nino',fontsize=10) - - # Add colorbar # - - cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) - cbar = fig.colorbar(cs, cax=cb_ax, ticks=ticks,orientation='horizontal') - cbar.ax.tick_params(labelsize=8, width=1) - cbar.ax.set_xticklabels(ticks,weight='normal') - - plt.subplots_adjust(top=0.86,bottom=0.16,hspace=0.5,wspace=0.55,left=0.08,right=0.95) - return fig, ax - +def enso_uzm(uzm, negative_indices, positive_indices, titles, plot_months, axes): + r""" Compute the zonal mean zonal wind response to ENSO. Anomalies are defined as + deviations from the seasonal cycle and composites of anomalies are made during La Nina years, + El Nino years, and then their difference is shown. Stippling is used to denote statistical + significance on the La Nina minus El Nino composites. + + Parameters + ---------- + uzm : xarray.DataArray + The zonal mean zonal wind. + + negative_indices : list + A list of La Nina years. + + positive_indices : list + A list of El Nino years. + + titles : list of strings + A list of month names that will be used as the titles for each column of subplots + + plot_months : list + A list of numbers corresponding to each month (e.g., 10 = october) + + axes : A list of numbers used to set the pressure and latitude limits of the subplots. + [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere + + Returns + ------- + 3 row by 6 column plot of La Nina uzm anomalies (top row), El Nino uzm anomalies (middle), + and their difference (bottom row) with stippling highlighting differences between El Nino + and La Nina winds statistically significant at the 95% level using a two-sided t-test. + + Notes + ----- + The input field uzm is assumed to have dimensions named "lat" and "lev". + E.g., if your data has dimensions "latitude" and/or "level", + use the rename method: + ds.rename({'latitude':'lat','level':'lev'}) + """ + + nina_out = [] + nino_out = [] + diff_out = [] + sigs_out = [] + clim_out = [] + + for mon in plot_months: + + clim = uzm.sel(time=uzm.time.dt.month.isin([mon])) + clim_out.append(clim.mean('time').values) + + anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") + + if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: + tmp_negative_indices = np.add(negative_indices, 0) + tmp_positive_indices = np.add(positive_indices, 0) + if mon == 1 or mon == 2 or mon == 3: + tmp_negative_indices = np.add(negative_indices, 1) + tmp_positive_indices = np.add(positive_indices, 1) + + nina_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) + nino_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) + + t, p = stats.ttest_ind(nina_tmp.values, nino_tmp.values, axis=0, nan_policy='omit') + + sigs_out.append(np.subtract(1, p)) + diff_out.append(np.subtract(nina_tmp.mean('time').values, nino_tmp.mean('time').values)) + nina_out.append(nina_tmp.mean('time').values) + nino_out.append(nino_tmp.mean('time').values) + + clim.close() + anom.close() + nina_tmp.close() + nino_tmp.close() + uzm.close() + + nina_out = np.array(nina_out) + nino_out = np.array(nino_out) + diff_out = np.array(diff_out) + sigs_out = np.array(sigs_out) + clim_out = np.array(clim_out) + + # ############ Begin the plotting ############ + + fig, ax = plt.subplots() + + mpl.rcParams['font.sans-serif'].insert(0, 'Arial') + + vmin = -10 + vmax = 10 + vlevs = np.linspace(vmin, vmax, num=21) + vlevs = [v for v in vlevs if v != 0] + ticks = [vmin, vmin / 2, 0, vmax / 2, vmax] + + cmin = -200 + cmax = 200 + clevs = np.linspace(cmin, cmax, num=41) + clevs = [v for v in clevs if v != 0] + + plt.suptitle('ENSO zonal-mean zonal wind (m/s)', fontsize=12, fontweight='normal') + + # Add colormap # + from palettable.colorbrewer.diverging import RdBu_11 + cmap1 = RdBu_11.mpl_colormap.reversed() + + x, y = np.meshgrid(uzm.lat.values, uzm.lev.values) + uzm.close() + + cols = [0, 1, 2, 3, 4] + + for i in cols: + + print(i) + + # Nina # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(0, cols[i])) + plt.title('%s' % titles[i], fontsize=10, y=0.93, fontweight='normal') + + cs = plt.contourf(x, y, nina_out[i], cmap=cmap1, levels=vlevs, extend="both", vmin=vmin, vmax=vmax, zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nina (%s seasons)' % int(len(negative_indices)), fontsize=10) + + # Nino # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(1, cols[i])) + + cs = plt.contourf(x, y, nino_out[i], cmap=cmap1, levels=vlevs, extend="both", vmin=vmin, vmax=vmax, zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nino (%s seasons)' % int(len(positive_indices)), fontsize=10) + + # Diff: Nina minus Nino # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(2, cols[i])) + + cs = plt.contourf(x, y, diff_out[i], cmap=cmap1, levels=vlevs, extend="both", vmin=vmin, vmax=vmax, zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + plt.xlabel('Latitude', fontsize=8, fontweight='normal') + + sig_levs = [0.95, 1] + mpl.rcParams['hatch.linewidth'] = 0.2 + plt.contourf(x, y, sigs_out[i], colors='black', vmin=0.95, vmax=1, levels=sig_levs, + hatches=['......', '......'], alpha=0.0) + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nina - Nino', fontsize=10) + + # Add colorbar # + + cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) + cbar = fig.colorbar(cs, cax=cb_ax, ticks=ticks, orientation='horizontal') + cbar.ax.tick_params(labelsize=8, width=1) + cbar.ax.set_xticklabels(ticks, weight='normal') + + plt.subplots_adjust(top=0.86, bottom=0.16, hspace=0.5, wspace=0.55, left=0.08, right=0.95) + return fig, ax + + ################################################################################################## ################################################################################################## ################################################################################################## - -def enso_vt(vt,negative_indices,positive_indices,titles,plot_months,axes): - - r""" Compute the zonal mean eddy heat flux response to ENSO. Anomalies are defined as - deviations from the seasonal cycle and composites of anomalies are made during La Nina years, - El Nino years, and then their difference is shown. Stippling is used to denote statistical - significance on the La Nina minus El Nino composites. - - Parameters - ---------- - vt : xarray.DataArray - The zonal mean eddy heat flux. This quantity is calculated using the - compute_total_eddy_heat_flux function given in the driver script stc_qbo_enso.py - - negative_indices : list - A list of La Nina years. - - positive_indices : list - A list of El Nino years. - - titles : list of strings - A list of month names that will be used as the titles for each column of subplots - - plot_months : list - A list of numbers corresponding to each month (e.g., 10 = october) - - axes : A list of numbers used to set the pressure and latitude limits of the subplots. - [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere - - Returns - ------- - 3 row by 6 column plot of La Nina vt anomalies (top row), El Nino vt anomalies (middle), - and their difference (bottom row) with stippling highlighting differences between El Nino - and La Nina winds statistically significant at the 95% level using a two-sided t-test. - - Notes - ----- - The input field vt is assumed to have dimensions named "lat" and "lev". - E.g., if your data has dimensions "latitude" and/or "level", - use the rename method: - ds.rename({'latitude':'lat','level':'lev'}) - """ - - nina_out = [] - nino_out = [] - diff_out = [] - sigs_out = [] - clim_out = [] - - for mon in plot_months: - - clim = vt.sel(time=vt.time.dt.month.isin([mon])) - clim_out.append(clim.mean('time').values) - - anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") - - if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: - tmp_negative_indices = np.add(negative_indices,0) - tmp_positive_indices = np.add(positive_indices,0) - if mon == 1 or mon == 2 or mon == 3: - tmp_negative_indices = np.add(negative_indices,1) - tmp_positive_indices = np.add(positive_indices,1) - - nina_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) - nino_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) - - t,p = stats.ttest_ind(nina_tmp.values,nino_tmp.values,axis=0,nan_policy='omit') - - sigs_out.append(np.subtract(1,p)) - diff_out.append(np.subtract(nina_tmp.mean('time').values,nino_tmp.mean('time').values)) - nina_out.append(nina_tmp.mean('time').values) - nino_out.append(nino_tmp.mean('time').values) - - clim.close() - anom.close() - nina_tmp.close() - nino_tmp.close() - vt.close() - - nina_out = np.array(nina_out) - nino_out = np.array(nino_out) - diff_out = np.array(diff_out) - sigs_out = np.array(sigs_out) - clim_out = np.array(clim_out) - - ############# Begin the plotting ############ - - fig, ax = plt.subplots() - - mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - - blevs = [] - - blevs.append(-2) - blevs.append(-6) - blevs.append(-10) - blevs.append(-25) - blevs.append(-50) - blevs.append(-100) - #blevs.append(-200) - - blevs.append(2) - blevs.append(6) - blevs.append(10) - blevs.append(25) - blevs.append(50) - blevs.append(100) - #blevs.append(200) - - - blevs = np.sort(blevs) - print (blevs) - - cmin = -200 - cmax = 200 - clevs = np.linspace(cmin,cmax,num=41) - clevs = [v for v in clevs if v != 0] - - plt.suptitle('ENSO zonal-mean eddy heat flux (Km/s)',fontsize=12,fontweight='normal') - - # Add colormap # - from palettable.colorbrewer.diverging import RdBu_11 - cmap1=RdBu_11.mpl_colormap.reversed() - - x, y = np.meshgrid(vt.lat.values,vt.lev.values) - - cols = [0,1,2,3,4] - - for i in cols: - - print (i) - - # Nina # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(0,cols[i])) - plt.title('%s' % titles[i],fontsize=10,y=0.93,fontweight='normal') - - cs = plt.contourf(x,y,nina_out[i],blevs,norm = mpl.colors.SymLogNorm(linthresh=2,linscale=1,vmin=-100,vmax=100),cmap=cmap1,extend="both",zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nina (%s seasons)' % int(len(negative_indices)),fontsize=10) - - # Nino # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(1,cols[i])) - - cs = plt.contourf(x,y,nino_out[i],blevs,norm = mpl.colors.SymLogNorm(linthresh=2,linscale=1,vmin=-100,vmax=100),cmap=cmap1,extend="both",zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nino (%s seasons)' % int(len(positive_indices)),fontsize=10) - - # Diff: Nina minus Nino # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(2,cols[i])) - - cs = plt.contourf(x,y,diff_out[i],blevs,norm = mpl.colors.SymLogNorm(linthresh=2,linscale=1,vmin=-100,vmax=100),cmap=cmap1,extend="both",zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - plt.xlabel('Latitude',fontsize=8,fontweight='normal') - - sig_levs = [0.95,1] - mpl.rcParams['hatch.linewidth'] = 0.2 - plt.contourf(x,y,sigs_out[i],colors='black',vmin=0.95,vmax=1,levels=sig_levs,hatches=['......','......'],alpha=0.0) - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nina - Nino',fontsize=10) - - # Add colorbar # - - oticks = [-100,-50,-25,-10,-6,-2,2,6,10,25,50,100] - cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) - cbar = fig.colorbar(cs, cax=cb_ax, ticks=oticks,orientation='horizontal') - cbar.ax.tick_params(labelsize=6, width=1) - cbar.ax.set_xticklabels(oticks,weight='normal') - - plt.subplots_adjust(top=0.86,bottom=0.16,hspace=0.5,wspace=0.55,left=0.08,right=0.95) - return fig, ax - + +def enso_vt(vt, negative_indices, positive_indices, titles, plot_months, axes): + r""" Compute the zonal mean eddy heat flux response to ENSO. Anomalies are defined as + deviations from the seasonal cycle and composites of anomalies are made during La Nina years, + El Nino years, and then their difference is shown. Stippling is used to denote statistical + significance on the La Nina minus El Nino composites. + + Parameters + ---------- + vt : xarray.DataArray + The zonal mean eddy heat flux. This quantity is calculated using the + compute_total_eddy_heat_flux function given in the driver script stc_qbo_enso.py + + negative_indices : list + A list of La Nina years. + + positive_indices : list + A list of El Nino years. + + titles : list of strings + A list of month names that will be used as the titles for each column of subplots + + plot_months : list + A list of numbers corresponding to each month (e.g., 10 = october) + + axes : A list of numbers used to set the pressure and latitude limits of the subplots. + [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere + + Returns + ------- + 3 row by 6 column plot of La Nina vt anomalies (top row), El Nino vt anomalies (middle), + and their difference (bottom row) with stippling highlighting differences between El Nino + and La Nina winds statistically significant at the 95% level using a two-sided t-test. + + Notes + ----- + The input field vt is assumed to have dimensions named "lat" and "lev". + E.g., if your data has dimensions "latitude" and/or "level", + use the rename method: + ds.rename({'latitude':'lat','level':'lev'}) + """ + + nina_out = [] + nino_out = [] + diff_out = [] + sigs_out = [] + clim_out = [] + + for mon in plot_months: + + clim = vt.sel(time=vt.time.dt.month.isin([mon])) + clim_out.append(clim.mean('time').values) + + anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") + + if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: + tmp_negative_indices = np.add(negative_indices, 0) + tmp_positive_indices = np.add(positive_indices, 0) + if mon == 1 or mon == 2 or mon == 3: + tmp_negative_indices = np.add(negative_indices, 1) + tmp_positive_indices = np.add(positive_indices, 1) + + nina_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) + nino_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) + + t, p = stats.ttest_ind(nina_tmp.values, nino_tmp.values, axis=0, nan_policy='omit') + + sigs_out.append(np.subtract(1, p)) + diff_out.append(np.subtract(nina_tmp.mean('time').values, nino_tmp.mean('time').values)) + nina_out.append(nina_tmp.mean('time').values) + nino_out.append(nino_tmp.mean('time').values) + + clim.close() + anom.close() + nina_tmp.close() + nino_tmp.close() + vt.close() + + nina_out = np.array(nina_out) + nino_out = np.array(nino_out) + diff_out = np.array(diff_out) + sigs_out = np.array(sigs_out) + clim_out = np.array(clim_out) + + ############# Begin the plotting ############ + + fig, ax = plt.subplots() + + mpl.rcParams['font.sans-serif'].insert(0, 'Arial') + + blevs = [] + + blevs.append(-2) + blevs.append(-6) + blevs.append(-10) + blevs.append(-25) + blevs.append(-50) + blevs.append(-100) + # blevs.append(-200) + + blevs.append(2) + blevs.append(6) + blevs.append(10) + blevs.append(25) + blevs.append(50) + blevs.append(100) + # blevs.append(200) + + blevs = np.sort(blevs) + print(blevs) + + cmin = -200 + cmax = 200 + clevs = np.linspace(cmin, cmax, num=41) + clevs = [v for v in clevs if v != 0] + + plt.suptitle('ENSO zonal-mean eddy heat flux (Km/s)', fontsize=12, fontweight='normal') + + # Add colormap # + from palettable.colorbrewer.diverging import RdBu_11 + cmap1 = RdBu_11.mpl_colormap.reversed() + + x, y = np.meshgrid(vt.lat.values, vt.lev.values) + + cols = [0, 1, 2, 3, 4] + + for i in cols: + + print(i) + + # Nina # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(0, cols[i])) + plt.title('%s' % titles[i], fontsize=10, y=0.93, fontweight='normal') + + cs = plt.contourf(x, y, nina_out[i], blevs, + norm=mpl.colors.SymLogNorm(linthresh=2, linscale=1, vmin=-100, vmax=100), cmap=cmap1, + extend="both", zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nina (%s seasons)' % int(len(negative_indices)), fontsize=10) + + # Nino # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(1, cols[i])) + + cs = plt.contourf(x, y, nino_out[i], blevs, + norm=mpl.colors.SymLogNorm(linthresh=2, linscale=1, vmin=-100, vmax=100), cmap=cmap1, + extend="both", zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nino (%s seasons)' % int(len(positive_indices)), fontsize=10) + + # Diff: Nina minus Nino # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(2, cols[i])) + + cs = plt.contourf(x, y, diff_out[i], blevs, + norm=mpl.colors.SymLogNorm(linthresh=2, linscale=1, vmin=-100, vmax=100), cmap=cmap1, + extend="both", zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + plt.xlabel('Latitude', fontsize=8, fontweight='normal') + + sig_levs = [0.95, 1] + mpl.rcParams['hatch.linewidth'] = 0.2 + plt.contourf(x, y, sigs_out[i], colors='black', vmin=0.95, vmax=1, levels=sig_levs, + hatches=['......', '......'], alpha=0.0) + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nina - Nino', fontsize=10) + + # Add colorbar # + + oticks = [-100, -50, -25, -10, -6, -2, 2, 6, 10, 25, 50, 100] + cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) + cbar = fig.colorbar(cs, cax=cb_ax, ticks=oticks, orientation='horizontal') + cbar.ax.tick_params(labelsize=6, width=1) + cbar.ax.set_xticklabels(oticks, weight='normal') + + plt.subplots_adjust(top=0.86, bottom=0.16, hspace=0.5, wspace=0.55, left=0.08, right=0.95) + return fig, ax + + ################################################################################################## ################################################################################################## ################################################################################################## - - -def enso_slp(ps,negative_indices,positive_indices,titles,plot_months,projection,axes): - - r""" Compute the sea level pressure response to ENSO. Anomalies are defined as - deviations from the seasonal cycle and composites of anomalies are made during La Nina years, - El Nino years, and then their difference is shown. Stippling is used to denote statistical - significance on the La Nina minus El Nino composites. - - Parameters - ---------- - ps : xarray.DataArray - The sea level pressure. - - negative_indices : list - A list of La Nina years. - - positive_indices : list - A list of El Nino years. - - titles : list of strings - A list of month names that will be used as the titles for each column of subplots - - plot_months : list - A list of numbers corresponding to each month (e.g., 10 = october) - - projection : ccrs.NorthPolarStereo() or ccrs.SouthPolarStereo() - - axes : A list of numbers used to set the longitude and latitude bounds of the subplots. - [-180, 180, 20, 90] are used for the N. hemisphere and [-180, 180, -90, -20] for S. hemisphere - - Returns - ------- - 3 row by 6 column plot of La Nina ps anomalies (top row), El Nino ps anomalies (middle), - and their difference (bottom row) with stippling highlighting differences between El Nino - and La Nina winds statistically significant at the 95% level using a two-sided t-test. - - Notes - ----- - The input field ps is assumed to have dimensions named "lat" and "lon". - E.g., if your data has dimensions "latitude" and/or "level", - use the rename method: - ds.rename({'latitude':'lat','longitude':'lon'}) - - The input field ps is expected to have units of hPa. Directly below, the code - will check to see if the units are Pa instead, and if they are, convert them to hPa. - """ - - if getattr(ps,'units') == 'Pa': - print(f'**Converting pressure levels to hPa') - ps.attrs['units'] = 'hPa' - ps.values[:] = ps.values/100. - - print (np.nanmin(ps.values)) - print (np.nanmedian(ps.values)) - print (np.nanmean(ps.values)) - print (np.nanmax(ps.values)) - - nina_out = [] - nino_out = [] - diff_out = [] - sigs_out = [] - clim_out = [] - - for mon in plot_months: - - clim = ps.sel(time=ps.time.dt.month.isin([mon])) - clim_out.append(clim.mean('time').values) - - anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") - - if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: - tmp_negative_indices = np.add(negative_indices,0) - tmp_positive_indices = np.add(positive_indices,0) - if mon == 1 or mon == 2 or mon == 3: - tmp_negative_indices = np.add(negative_indices,1) - tmp_positive_indices = np.add(positive_indices,1) - - nina_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) - nino_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) - - t,p = stats.ttest_ind(nina_tmp.values,nino_tmp.values,axis=0,nan_policy='omit') - - sigs_out.append(np.subtract(1,p)) - diff_out.append(np.subtract(nina_tmp.mean('time').values,nino_tmp.mean('time').values)) - nina_out.append(nina_tmp.mean('time').values) - nino_out.append(nino_tmp.mean('time').values) - - clim.close() - anom.close() - nina_tmp.close() - nino_tmp.close() - ps.close() - - nina_out = np.array(nina_out) - nino_out = np.array(nino_out) - diff_out = np.array(diff_out) - sigs_out = np.array(sigs_out) - clim_out = np.array(clim_out) - - ############# Begin the plotting ############ - - fig, ax = plt.subplots() - - mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - - vmin = -10 - vmax = 10 - vlevs = np.linspace(vmin,vmax,num=21) - vlevs = [v for v in vlevs if v != 0] - ticks = [vmin,vmin/2,0,vmax/2,vmax] - - cmin = 900 - cmax = 1100 - clevs = np.linspace(cmin,cmax,num=21) - - plt.suptitle('Nina - Nino sea level pressure (hPa)',fontsize=12,fontweight='normal') - - # Add colormap # - - from palettable.colorbrewer.diverging import RdBu_11 - cmap1=RdBu_11.mpl_colormap.reversed() - - lons = ps.lon.values - lats = ps.lat.values - - cols = [0,1,2,3,4] - - for i in cols: - - print (i) - - ######## - # Nina # - ######## - ax1 = plt.subplot2grid(shape=(3,5), loc=(0,cols[i]), projection=projection) - ax1.set_extent(axes, ccrs.PlateCarree()) - - plt.title('%s' % titles[i],fontsize=10,y=0.93,fontweight='normal') - - # Plot style features # - ax1.coastlines(linewidth=0.25) - theta = np.linspace(0, 2*np.pi, 100) - center, radius = [0.5, 0.5], 0.5 - verts = np.vstack([np.sin(theta), np.cos(theta)]).T - circle = mpath.Path(verts * radius + center) - ax1.set_boundary(circle, transform=ax1.transAxes) - pos1 = ax1.get_position() - plt.title("%s" % titles[i], fontsize=10,fontweight='normal',y=0.98) - cyclic_z, cyclic_lon = add_cyclic_point(nina_out[i], coord=lons) +def enso_slp(ps, negative_indices, positive_indices, titles, plot_months, projection, axes): + r""" Compute the sea level pressure response to ENSO. Anomalies are defined as + deviations from the seasonal cycle and composites of anomalies are made during La Nina years, + El Nino years, and then their difference is shown. Stippling is used to denote statistical + significance on the La Nina minus El Nino composites. + + Parameters + ---------- + ps : xarray.DataArray + The sea level pressure. + + negative_indices : list + A list of La Nina years. + + positive_indices : list + A list of El Nino years. + + titles : list of strings + A list of month names that will be used as the titles for each column of subplots + + plot_months : list + A list of numbers corresponding to each month (e.g., 10 = october) + + projection : ccrs.NorthPolarStereo() or ccrs.SouthPolarStereo() + + axes : A list of numbers used to set the longitude and latitude bounds of the subplots. + [-180, 180, 20, 90] are used for the N. hemisphere and [-180, 180, -90, -20] for S. hemisphere + + Returns + ------- + 3 row by 6 column plot of La Nina ps anomalies (top row), El Nino ps anomalies (middle), + and their difference (bottom row) with stippling highlighting differences between El Nino + and La Nina winds statistically significant at the 95% level using a two-sided t-test. + + Notes + ----- + The input field ps is assumed to have dimensions named "lat" and "lon". + E.g., if your data has dimensions "latitude" and/or "level", + use the rename method: + ds.rename({'latitude':'lat','longitude':'lon'}) + + The input field ps is expected to have units of hPa. Directly below, the code + will check to see if the units are Pa instead, and if they are, convert them to hPa. + """ + + if getattr(ps, 'units') == 'Pa': + print(f'**Converting pressure levels to hPa') + ps.attrs['units'] = 'hPa' + ps.values[:] = ps.values / 100. + + print(np.nanmin(ps.values)) + print(np.nanmedian(ps.values)) + print(np.nanmean(ps.values)) + print(np.nanmax(ps.values)) + + nina_out = [] + nino_out = [] + diff_out = [] + sigs_out = [] + clim_out = [] + + for mon in plot_months: + + clim = ps.sel(time=ps.time.dt.month.isin([mon])) + clim_out.append(clim.mean('time').values) + + anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") + + if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: + tmp_negative_indices = np.add(negative_indices, 0) + tmp_positive_indices = np.add(positive_indices, 0) + if mon == 1 or mon == 2 or mon == 3: + tmp_negative_indices = np.add(negative_indices, 1) + tmp_positive_indices = np.add(positive_indices, 1) + + nina_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) + nino_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) + + t, p = stats.ttest_ind(nina_tmp.values, nino_tmp.values, axis=0, nan_policy='omit') + + sigs_out.append(np.subtract(1, p)) + diff_out.append(np.subtract(nina_tmp.mean('time').values, nino_tmp.mean('time').values)) + nina_out.append(nina_tmp.mean('time').values) + nino_out.append(nino_tmp.mean('time').values) + + clim.close() + anom.close() + nina_tmp.close() + nino_tmp.close() + ps.close() + + nina_out = np.array(nina_out) + nino_out = np.array(nino_out) + diff_out = np.array(diff_out) + sigs_out = np.array(sigs_out) + clim_out = np.array(clim_out) + + # ############ Begin the plotting ############ + + fig, ax = plt.subplots() + + mpl.rcParams['font.sans-serif'].insert(0, 'Arial') + + vmin = -10 + vmax = 10 + vlevs = np.linspace(vmin, vmax, num=21) + vlevs = [v for v in vlevs if v != 0] + ticks = [vmin, vmin / 2, 0, vmax / 2, vmax] - # Plot anomalies # + cmin = 900 + cmax = 1100 + clevs = np.linspace(cmin, cmax, num=21) - contourf = ax1.contourf(cyclic_lon, lats, cyclic_z,transform=ccrs.PlateCarree(),cmap=cmap1,vmin=vmin,vmax=vmax,levels=vlevs,extend='both',zorder=1) + plt.suptitle('Nina - Nino sea level pressure (hPa)', fontsize=12, fontweight='normal') - # Overlay the climatology # + # Add colormap # - cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) - cs = ax1.contour(cyclic_lon, lats, cyclic_clim,transform=ccrs.PlateCarree(),colors='k',linewidths=0.5,vmin=cmin,vmax=cmax,levels=clevs,extend='both',zorder=3) + from palettable.colorbrewer.diverging import RdBu_11 + cmap1 = RdBu_11.mpl_colormap.reversed() - plt.rc('font',weight='normal') - plt.clabel(cs,cs.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - plt.rc('font',weight='normal') + lons = ps.lon.values + lats = ps.lat.values - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nina (%s seasons)' % int(len(negative_indices)),fontsize=10) - ax2.spines['top'].set_visible(False) - ax2.spines['right'].set_visible(False) - ax2.spines['bottom'].set_visible(False) - ax2.spines['left'].set_visible(False) - ax2.get_yaxis().set_ticks([]) + cols = [0, 1, 2, 3, 4] - ######## - # Nino # - ######## + for i in cols: + + print(i) + + ######## + # Nina # + ######## - ax1 = plt.subplot2grid(shape=(3,5), loc=(1,cols[i]), projection=projection) - ax1.set_extent(axes, ccrs.PlateCarree()) + ax1 = plt.subplot2grid(shape=(3, 5), loc=(0, cols[i]), projection=projection) + ax1.set_extent(axes, ccrs.PlateCarree()) - # Plot style features # + plt.title('%s' % titles[i], fontsize=10, y=0.93, fontweight='normal') - ax1.coastlines(linewidth=0.25) - theta = np.linspace(0, 2*np.pi, 100) - center, radius = [0.5, 0.5], 0.5 - verts = np.vstack([np.sin(theta), np.cos(theta)]).T - circle = mpath.Path(verts * radius + center) - ax1.set_boundary(circle, transform=ax1.transAxes) - pos1 = ax1.get_position() - plt.title("%s" % titles[i], fontsize=10,fontweight='normal',y=0.98) - cyclic_z, cyclic_lon = add_cyclic_point(nino_out[i], coord=lons) + # Plot style features # - # Plot anomalies # + ax1.coastlines(linewidth=0.25) + theta = np.linspace(0, 2 * np.pi, 100) + center, radius = [0.5, 0.5], 0.5 + verts = np.vstack([np.sin(theta), np.cos(theta)]).T + circle = mpath.Path(verts * radius + center) + ax1.set_boundary(circle, transform=ax1.transAxes) + pos1 = ax1.get_position() + plt.title("%s" % titles[i], fontsize=10, fontweight='normal', y=0.98) + cyclic_z, cyclic_lon = add_cyclic_point(nina_out[i], coord=lons) - contourf = ax1.contourf(cyclic_lon, lats, cyclic_z,transform=ccrs.PlateCarree(),cmap=cmap1,vmin=vmin,vmax=vmax,levels=vlevs,extend='both',zorder=1) + # Plot anomalies # - # Overlay the climatology # + contourf = ax1.contourf(cyclic_lon, lats, cyclic_z, transform=ccrs.PlateCarree(), cmap=cmap1, vmin=vmin, + vmax=vmax, levels=vlevs, extend='both', zorder=1) - cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) - cs = ax1.contour(cyclic_lon, lats, cyclic_clim,transform=ccrs.PlateCarree(),colors='k',linewidths=0.5,vmin=cmin,vmax=cmax,levels=clevs,extend='both',zorder=3) + # Overlay the climatology # - plt.rc('font',weight='normal') - plt.clabel(cs,cs.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - plt.rc('font',weight='normal') + cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) + cs = ax1.contour(cyclic_lon, lats, cyclic_clim, transform=ccrs.PlateCarree(), colors='k', linewidths=0.5, + vmin=cmin, vmax=cmax, levels=clevs, extend='both', zorder=3) - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nino (%s seasons)' % int(len(positive_indices)),fontsize=10) - ax2.spines['top'].set_visible(False) - ax2.spines['right'].set_visible(False) - ax2.spines['bottom'].set_visible(False) - ax2.spines['left'].set_visible(False) - ax2.get_yaxis().set_ticks([]) + plt.rc('font', weight='normal') + plt.clabel(cs, cs.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + plt.rc('font', weight='normal') - ############## - # Difference # - ############## + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nina (%s seasons)' % int(len(negative_indices)), fontsize=10) + ax2.spines['top'].set_visible(False) + ax2.spines['right'].set_visible(False) + ax2.spines['bottom'].set_visible(False) + ax2.spines['left'].set_visible(False) + ax2.get_yaxis().set_ticks([]) + + ######## + # Nino # + ######## + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(1, cols[i]), projection=projection) + ax1.set_extent(axes, ccrs.PlateCarree()) + + # Plot style features # + + ax1.coastlines(linewidth=0.25) + theta = np.linspace(0, 2 * np.pi, 100) + center, radius = [0.5, 0.5], 0.5 + verts = np.vstack([np.sin(theta), np.cos(theta)]).T + circle = mpath.Path(verts * radius + center) + ax1.set_boundary(circle, transform=ax1.transAxes) + pos1 = ax1.get_position() + plt.title("%s" % titles[i], fontsize=10, fontweight='normal', y=0.98) + cyclic_z, cyclic_lon = add_cyclic_point(nino_out[i], coord=lons) - ax1 = plt.subplot2grid(shape=(3,5), loc=(2,cols[i]), projection=projection) - ax1.set_extent(axes, ccrs.PlateCarree()) - - # Plot style features # - - ax1.coastlines(linewidth=0.25) - theta = np.linspace(0, 2*np.pi, 100) - center, radius = [0.5, 0.5], 0.5 - verts = np.vstack([np.sin(theta), np.cos(theta)]).T - circle = mpath.Path(verts * radius + center) - ax1.set_boundary(circle, transform=ax1.transAxes) - pos1 = ax1.get_position() - plt.title("%s" % titles[i], fontsize=10,fontweight='normal',y=0.98) - cyclic_z, cyclic_lon = add_cyclic_point(diff_out[i], coord=lons) - - # Plot anomalies # - - contourf = ax1.contourf(cyclic_lon, lats, cyclic_z,transform=ccrs.PlateCarree(),cmap=cmap1,vmin=vmin,vmax=vmax,levels=vlevs,extend='both',zorder=1) - - # Statistical significance # - - sig_levs = [0.95,1] - mpl.rcParams['hatch.linewidth'] = 0.2 - cyclic_sig, cyclic_lontmp = add_cyclic_point(sigs_out[i], coord=lons) - ax1.contourf(cyclic_lon, lats, cyclic_sig,transform=ccrs.PlateCarree(),colors='black',vmin=0.95,vmax=1,levels=sig_levs,hatches=['......','......'],alpha=0.0,zorder=2) - - # Overlay the climatology # - - cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) - cs = ax1.contour(cyclic_lon, lats, cyclic_clim,transform=ccrs.PlateCarree(),colors='k',linewidths=0.5,vmin=cmin,vmax=cmax,levels=clevs,extend='both',zorder=3) - - plt.rc('font',weight='normal') - plt.clabel(cs,cs.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - plt.rc('font',weight='normal') - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('Nina - Nino',fontsize=10) - ax2.spines['top'].set_visible(False) - ax2.spines['right'].set_visible(False) - ax2.spines['bottom'].set_visible(False) - ax2.spines['left'].set_visible(False) - ax2.get_yaxis().set_ticks([]) - - # Add colorbar # + # Plot anomalies # - cb_ax = fig.add_axes([0.35, 0.05, 0.30, 0.015]) - cbar = fig.colorbar(contourf, cax=cb_ax, ticks=ticks,orientation='horizontal') - cbar.ax.tick_params(labelsize=8, width=1) - cbar.ax.set_xticklabels(ticks,weight='normal') + contourf = ax1.contourf(cyclic_lon, lats, cyclic_z, transform=ccrs.PlateCarree(), cmap=cmap1, vmin=vmin, + vmax=vmax, levels=vlevs, extend='both', zorder=1) + + # Overlay the climatology # + + cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) + cs = ax1.contour(cyclic_lon, lats, cyclic_clim, transform=ccrs.PlateCarree(), colors='k', linewidths=0.5, + vmin=cmin, vmax=cmax, levels=clevs, extend='both', zorder=3) + plt.rc('font', weight='normal') + plt.clabel(cs, cs.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + plt.rc('font', weight='normal') - plt.subplots_adjust(top=0.86,bottom=0.09,hspace=0.3,wspace=0.0,left=0.02,right=0.94) - - return fig, ax \ No newline at end of file + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nino (%s seasons)' % int(len(positive_indices)), fontsize=10) + ax2.spines['top'].set_visible(False) + ax2.spines['right'].set_visible(False) + ax2.spines['bottom'].set_visible(False) + ax2.spines['left'].set_visible(False) + ax2.get_yaxis().set_ticks([]) + + ############## + # Difference # + ############## + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(2, cols[i]), projection=projection) + ax1.set_extent(axes, ccrs.PlateCarree()) + + # Plot style features # + + ax1.coastlines(linewidth=0.25) + theta = np.linspace(0, 2 * np.pi, 100) + center, radius = [0.5, 0.5], 0.5 + verts = np.vstack([np.sin(theta), np.cos(theta)]).T + circle = mpath.Path(verts * radius + center) + ax1.set_boundary(circle, transform=ax1.transAxes) + pos1 = ax1.get_position() + plt.title("%s" % titles[i], fontsize=10, fontweight='normal', y=0.98) + cyclic_z, cyclic_lon = add_cyclic_point(diff_out[i], coord=lons) + + # Plot anomalies # + + contourf = ax1.contourf(cyclic_lon, lats, cyclic_z, transform=ccrs.PlateCarree(), cmap=cmap1, vmin=vmin, + vmax=vmax, levels=vlevs, extend='both', zorder=1) + + # Statistical significance # + + sig_levs = [0.95, 1] + mpl.rcParams['hatch.linewidth'] = 0.2 + cyclic_sig, cyclic_lontmp = add_cyclic_point(sigs_out[i], coord=lons) + ax1.contourf(cyclic_lon, lats, cyclic_sig, transform=ccrs.PlateCarree(), colors='black', vmin=0.95, vmax=1, + levels=sig_levs, hatches=['......', '......'], alpha=0.0, zorder=2) + + # Overlay the climatology # + + cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) + cs = ax1.contour(cyclic_lon, lats, cyclic_clim, transform=ccrs.PlateCarree(), colors='k', linewidths=0.5, + vmin=cmin, vmax=cmax, levels=clevs, extend='both', zorder=3) + + plt.rc('font', weight='normal') + plt.clabel(cs, cs.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + plt.rc('font', weight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('Nina - Nino', fontsize=10) + ax2.spines['top'].set_visible(False) + ax2.spines['right'].set_visible(False) + ax2.spines['bottom'].set_visible(False) + ax2.spines['left'].set_visible(False) + ax2.get_yaxis().set_ticks([]) + + # Add colorbar # + + cb_ax = fig.add_axes([0.35, 0.05, 0.30, 0.015]) + cbar = fig.colorbar(contourf, cax=cb_ax, ticks=ticks, orientation='horizontal') + cbar.ax.tick_params(labelsize=8, width=1) + cbar.ax.set_xticklabels(ticks, weight='normal') + + plt.subplots_adjust(top=0.86, bottom=0.09, hspace=0.3, wspace=0.0, left=0.02, right=0.94) + + return fig, ax diff --git a/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeqbo.py b/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeqbo.py index 7b311821a..c90bb23b0 100644 --- a/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeqbo.py +++ b/diagnostics/stc_qbo_enso/stc_qbo_enso_plottingcodeqbo.py @@ -1,11 +1,11 @@ -''' +""" This module contains functions used in the Stratospheric QBO and ENSO POD. Contains: qbo_slp: plots sea level pressure response to QBO as a function of month and QBO phase qbo_uzm: plots the zonal-mean zonal wind response to QBO as a function of month and QBO phase qbo_vt: plots the zonally averaged eddy heat flux response to the QBO as a function of month and QBO phase -''' +""" import numpy as np import matplotlib as mpl @@ -16,759 +16,787 @@ from scipy import stats + ################################################################################################## ################################################################################################## ################################################################################################## -def qbo_uzm(uzm,negative_indices,positive_indices,QBOisobar,titles,plot_months,axes): - - r""" Compute the zonal mean zonal wind response to the QBO. Anomalies are defined as - deviations from the seasonal cycle and composites of anomalies are made during easterly QBO, - westerly QBO, and then their difference is shown. Stippling is used to denote statistical - significance on the EQBO minus WQBO composites. - - Parameters - ---------- - uzm : xarray.DataArray - The zonal mean zonal wind. - - negative_indices : list - A list of easterly QBO years. - - positive_indices : list - A list of westerly QBO years. - - QBOisobar : int - An integer defined by the user in the config.jsonc file specifying what isobar - is used to index the QBO - - titles : list of strings - A list of month names that will be used as the titles for each column of subplots - - plot_months : list - A list of numbers corresponding to each month (e.g., 10 = october) - - axes : A list of numbers used to set the pressure and latitude limits of the subplots. - [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere - - Returns - ------- - 3 row by 6 column plot of easterly QBO uzm anomalies (top row), westerly QBO uzm - anomalies (middle), and their difference (bottom row) with stippling highlighting differences - between EQBO and WQBO winds statistically significant at the 95% level using a two-sided t-test. - - Notes - ----- - The input field uzm is assumed to have dimensions named "lat" and "lev". - E.g., if your data has dimensions "latitude" and/or "level", - use the rename method: - ds.rename({'latitude':'lat','level':'lev'}) - """ - - eqbo_out = [] - wqbo_out = [] - diff_out = [] - sigs_out = [] - clim_out = [] - - for mon in plot_months: - - clim = uzm.sel(time=uzm.time.dt.month.isin([mon])) - clim_out.append(clim.mean('time').values) - - anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") - - if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: - tmp_negative_indices = np.add(negative_indices,0) - tmp_positive_indices = np.add(positive_indices,0) - if mon == 1 or mon == 2 or mon == 3: - tmp_negative_indices = np.add(negative_indices,1) - tmp_positive_indices = np.add(positive_indices,1) - - eqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) - wqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) - - t,p = stats.ttest_ind(eqbo_tmp.values,wqbo_tmp.values,axis=0,nan_policy='omit') - - sigs_out.append(np.subtract(1,p)) - diff_out.append(np.subtract(eqbo_tmp.mean('time').values,wqbo_tmp.mean('time').values)) - eqbo_out.append(eqbo_tmp.mean('time').values) - wqbo_out.append(wqbo_tmp.mean('time').values) - - clim.close() - anom.close() - eqbo_tmp.close() - wqbo_tmp.close() - uzm.close() - - eqbo_out = np.array(eqbo_out) - wqbo_out = np.array(wqbo_out) - diff_out = np.array(diff_out) - sigs_out = np.array(sigs_out) - clim_out = np.array(clim_out) - - ############# Begin the plotting ############ - - fig, ax = plt.subplots() - - mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - - vmin = -10 - vmax = 10 - vlevs = np.linspace(vmin,vmax,num=21) - vlevs = [v for v in vlevs if v != 0] - ticks = [vmin,vmin/2,0,vmax/2,vmax] - - cmin = -200 - cmax = 200 - clevs = np.linspace(cmin,cmax,num=41) - clevs = [v for v in clevs if v != 0] - - plt.suptitle('QBO (5S-5N index @ %s hPa) zonal-mean zonal wind (m/s)' % int(QBOisobar),fontsize=12,fontweight='normal') - - # Add colormap # - from palettable.colorbrewer.diverging import RdBu_11 - cmap1=RdBu_11.mpl_colormap.reversed() - - x, y = np.meshgrid(uzm.lat.values,uzm.lev.values) - - cols = [0,1,2,3,4] - - for i in cols: - - # eqbo # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(0,cols[i])) - plt.title('%s' % titles[i],fontsize=10,y=0.93,fontweight='normal') - - cs = plt.contourf(x,y,eqbo_out[i],cmap=cmap1,levels=vlevs,extend="both",vmin=vmin,vmax=vmax,zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,5,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('eqbo (%s seasons)' % int(len(negative_indices)),fontsize=10) - - # wqbo # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(1,cols[i])) - - cs = plt.contourf(x,y,wqbo_out[i],cmap=cmap1,levels=vlevs,extend="both",vmin=vmin,vmax=vmax,zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,5,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('wqbo (%s seasons)' % int(len(positive_indices)),fontsize=10) - - # Diff: eqbo minus wqbo # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(2,cols[i])) - - cs = plt.contourf(x,y,diff_out[i],cmap=cmap1,levels=vlevs,extend="both",vmin=vmin,vmax=vmax,zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,5,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - plt.xlabel('Latitude',fontsize=8,fontweight='normal') - - sig_levs = [0.95,1] - mpl.rcParams['hatch.linewidth'] = 0.2 - plt.contourf(x,y,sigs_out[i],colors='black',vmin=0.95,vmax=1,levels=sig_levs,hatches=['......','......'],alpha=0.0) - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('eqbo - wqbo',fontsize=10) - - # Add colorbar # - - cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) - cbar = fig.colorbar(cs, cax=cb_ax, ticks=ticks,orientation='horizontal') - cbar.ax.tick_params(labelsize=8, width=1) - cbar.ax.set_xticklabels(ticks,weight='normal') - - plt.subplots_adjust(top=0.86,bottom=0.16,hspace=0.5,wspace=0.55,left=0.08,right=0.95) - return fig, ax + +def qbo_uzm(uzm, negative_indices, positive_indices, QBOisobar, titles, plot_months, axes): + r""" Compute the zonal mean zonal wind response to the QBO. Anomalies are defined as + deviations from the seasonal cycle and composites of anomalies are made during easterly QBO, + westerly QBO, and then their difference is shown. Stippling is used to denote statistical + significance on the EQBO minus WQBO composites. + + Parameters + ---------- + uzm : xarray.DataArray + The zonal mean zonal wind. + + negative_indices : list + A list of easterly QBO years. + + positive_indices : list + A list of westerly QBO years. + + QBOisobar : int + An integer defined by the user in the config.jsonc file specifying what isobar + is used to index the QBO + + titles : list of strings + A list of month names that will be used as the titles for each column of subplots + + plot_months : list + A list of numbers corresponding to each month (e.g., 10 = october) + + axes : A list of numbers used to set the pressure and latitude limits of the subplots. + [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere + + Returns + ------- + 3 row by 6 column plot of easterly QBO uzm anomalies (top row), westerly QBO uzm + anomalies (middle), and their difference (bottom row) with stippling highlighting differences + between EQBO and WQBO winds statistically significant at the 95% level using a two-sided t-test. + + Notes + ----- + The input field uzm is assumed to have dimensions named "lat" and "lev". + E.g., if your data has dimensions "latitude" and/or "level", + use the rename method: + ds.rename({'latitude':'lat','level':'lev'}) + """ + + eqbo_out = [] + wqbo_out = [] + diff_out = [] + sigs_out = [] + clim_out = [] + + for mon in plot_months: + + clim = uzm.sel(time=uzm.time.dt.month.isin([mon])) + clim_out.append(clim.mean('time').values) + + anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") + + if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: + tmp_negative_indices = np.add(negative_indices, 0) + tmp_positive_indices = np.add(positive_indices, 0) + if mon == 1 or mon == 2 or mon == 3: + tmp_negative_indices = np.add(negative_indices, 1) + tmp_positive_indices = np.add(positive_indices, 1) + + eqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) + wqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) + + t, p = stats.ttest_ind(eqbo_tmp.values, wqbo_tmp.values, axis=0, nan_policy='omit') + + sigs_out.append(np.subtract(1, p)) + diff_out.append(np.subtract(eqbo_tmp.mean('time').values, wqbo_tmp.mean('time').values)) + eqbo_out.append(eqbo_tmp.mean('time').values) + wqbo_out.append(wqbo_tmp.mean('time').values) + + clim.close() + anom.close() + eqbo_tmp.close() + wqbo_tmp.close() + uzm.close() + + eqbo_out = np.array(eqbo_out) + wqbo_out = np.array(wqbo_out) + diff_out = np.array(diff_out) + sigs_out = np.array(sigs_out) + clim_out = np.array(clim_out) + + # ############ Begin the plotting ############ + + fig, ax = plt.subplots() + + mpl.rcParams['font.sans-serif'].insert(0, 'Arial') + + vmin = -10 + vmax = 10 + vlevs = np.linspace(vmin, vmax, num=21) + vlevs = [v for v in vlevs if v != 0] + ticks = [vmin, vmin / 2, 0, vmax / 2, vmax] + + cmin = -200 + cmax = 200 + clevs = np.linspace(cmin, cmax, num=41) + clevs = [v for v in clevs if v != 0] + + plt.suptitle('QBO (5S-5N index @ %s hPa) zonal-mean zonal wind (m/s)' % int(QBOisobar), fontsize=12, + fontweight='normal') + + # Add colormap # + from palettable.colorbrewer.diverging import RdBu_11 + cmap1 = RdBu_11.mpl_colormap.reversed() + + x, y = np.meshgrid(uzm.lat.values, uzm.lev.values) + + cols = [0, 1, 2, 3, 4] + + for i in cols: + + # eqbo # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(0, cols[i])) + plt.title('%s' % titles[i], fontsize=10, y=0.93, fontweight='normal') + + cs = plt.contourf(x, y, eqbo_out[i], cmap=cmap1, levels=vlevs, extend="both", vmin=vmin, vmax=vmax, + zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 5, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('eqbo (%s seasons)' % int(len(negative_indices)), fontsize=10) + + # wqbo # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(1, cols[i])) + + cs = plt.contourf(x, y, wqbo_out[i], cmap=cmap1, levels=vlevs, extend="both", vmin=vmin, vmax=vmax, + zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 5, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('wqbo (%s seasons)' % int(len(positive_indices)), fontsize=10) + + # Diff: eqbo minus wqbo # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(2, cols[i])) + + cs = plt.contourf(x, y, diff_out[i], cmap=cmap1, levels=vlevs, extend="both", vmin=vmin, vmax=vmax, + zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 5, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + plt.xlabel('Latitude', fontsize=8, fontweight='normal') + + sig_levs = [0.95, 1] + mpl.rcParams['hatch.linewidth'] = 0.2 + plt.contourf(x, y, sigs_out[i], colors='black', vmin=0.95, vmax=1, levels=sig_levs, + hatches=['......', '......'], alpha=0.0) + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('eqbo - wqbo', fontsize=10) + + # Add colorbar # + + cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) + cbar = fig.colorbar(cs, cax=cb_ax, ticks=ticks, orientation='horizontal') + cbar.ax.tick_params(labelsize=8, width=1) + cbar.ax.set_xticklabels(ticks, weight='normal') + + plt.subplots_adjust(top=0.86, bottom=0.16, hspace=0.5, wspace=0.55, left=0.08, right=0.95) + return fig, ax + ################################################################################################## ################################################################################################## ################################################################################################## -def qbo_vt(vt,negative_indices,positive_indices,QBOisobar,titles,plot_months,axes): - - r""" Compute the zonal mean eddy heat flux response to the QBO. Anomalies are defined as - deviations from the seasonal cycle and composites of anomalies are made during easterly QBO, - westerly QBO, and then their difference is shown. Stippling is used to denote statistical - significance on the EQBO minus WQBO composites. - - Parameters - ---------- - vt : xarray.DataArray - The zonal mean eddy heat flux. This quantity is calculated using the - compute_total_eddy_heat_flux function given in the driver script stc_qbo_enso.py - - negative_indices : list - A list of easterly QBO years. - - positive_indices : list - A list of westerly QBO years. - - QBOisobar : int - An integer defined by the user in the config.jsonc file specifying what isobar - is used to index the QBO - - titles : list of strings - A list of month names that will be used as the titles for each column of subplots - - plot_months : list - A list of numbers corresponding to each month (e.g., 10 = october) - - axes : A list of numbers used to set the pressure and latitude limits of the subplots. - [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere - - Returns - ------- - 3 row by 6 column plot of easterly QBO vt anomalies (top row), westerly QBO vt - anomalies (middle), and their difference (bottom row) with stippling highlighting differences - between EQBO and WQBO winds statistically significant at the 95% level using a two-sided t-test. - - Notes - ----- - The input field vt is assumed to have dimensions named "lat" and "lev". - E.g., if your data has dimensions "latitude" and/or "level", - use the rename method: - ds.rename({'latitude':'lat','level':'lev'}) - """ - - eqbo_out = [] - wqbo_out = [] - diff_out = [] - sigs_out = [] - clim_out = [] - - for mon in plot_months: - - clim = vt.sel(time=vt.time.dt.month.isin([mon])) - clim_out.append(clim.mean('time').values) - - anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") - - # For NH, QBO index is based on Oct-Nov year. The plot will show Oct-Feb. Note that Jan-Feb are selected using QBO index year + 1 - # For SH, QBO index is based on Jul-Aug year. The plot will show Jul-Nov. - - if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: - tmp_negative_indices = np.add(negative_indices,0) - tmp_positive_indices = np.add(positive_indices,0) - if mon == 1 or mon == 2 or mon == 3: - tmp_negative_indices = np.add(negative_indices,1) - tmp_positive_indices = np.add(positive_indices,1) - - eqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) - wqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) - - t,p = stats.ttest_ind(eqbo_tmp.values,wqbo_tmp.values,axis=0,nan_policy='omit') - - sigs_out.append(np.subtract(1,p)) - diff_out.append(np.subtract(eqbo_tmp.mean('time').values,wqbo_tmp.mean('time').values)) - eqbo_out.append(eqbo_tmp.mean('time').values) - wqbo_out.append(wqbo_tmp.mean('time').values) - - clim.close() - anom.close() - eqbo_tmp.close() - wqbo_tmp.close() - vt.close() - - eqbo_out = np.array(eqbo_out) - wqbo_out = np.array(wqbo_out) - diff_out = np.array(diff_out) - sigs_out = np.array(sigs_out) - clim_out = np.array(clim_out) - - ############# Begin the plotting ############ - - fig, ax = plt.subplots() - - mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - - blevs = [] - - blevs.append(-2) - blevs.append(-6) - blevs.append(-10) - blevs.append(-25) - blevs.append(-50) - blevs.append(-100) - - blevs.append(2) - blevs.append(6) - blevs.append(10) - blevs.append(25) - blevs.append(50) - blevs.append(100) - - blevs = np.sort(blevs) - print (blevs) - - cmin = -200 - cmax = 200 - clevs = np.linspace(cmin,cmax,num=41) - clevs = [v for v in clevs if v != 0] - - plt.suptitle('QBO (5S-5N index @ %s hPa) zonal-mean eddy heat flux (Km/s)' % int(QBOisobar),fontsize=12,fontweight='normal') - - # Add colormap # - from palettable.colorbrewer.diverging import RdBu_11 - cmap1=RdBu_11.mpl_colormap.reversed() - - x, y = np.meshgrid(vt.lat.values,vt.lev.values) - - cols = [0,1,2,3,4] - - for i in cols: - - print (i) - - # eqbo # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(0,cols[i])) - plt.title('%s' % titles[i],fontsize=10,y=0.93,fontweight='normal') - - cs = plt.contourf(x,y,eqbo_out[i],blevs,norm = mpl.colors.SymLogNorm(linthresh=2,linscale=1,vmin=-100,vmax=100),cmap=cmap1,extend="both",zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,5,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('eqbo (%s seasons)' % int(len(negative_indices)),fontsize=10) - - # wqbo # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(1,cols[i])) - - cs = plt.contourf(x,y,wqbo_out[i],blevs,norm = mpl.colors.SymLogNorm(linthresh=2,linscale=1,vmin=-100,vmax=100),cmap=cmap1,extend="both",zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,5,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('wqbo (%s seasons)' % int(len(positive_indices)),fontsize=10) - - # Diff: eqbo minus wqbo # - - ax1 = plt.subplot2grid(shape=(3,5), loc=(2,cols[i])) - - cs = plt.contourf(x,y,diff_out[i],blevs,norm = mpl.colors.SymLogNorm(linthresh=2,linscale=1,vmin=-100,vmax=100),cmap=cmap1,extend="both",zorder=1) - - mpl.rcParams["lines.linewidth"] = 0.2 - mpl.rcParams["lines.dashed_pattern"] = 10, 3 - black = plt.contour(x,y,clim_out[i],colors='k',levels=clevs,extend="both",vmin=cmin,vmax=cmax,zorder=3) - plt.clabel(black,black.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - - plt.semilogy() - yticks = [1,5,10,100,1000] - plt.yticks(yticks,yticks,fontsize=6,fontweight='normal') - xticks = [-90,-60,-30,0,30,60,90] - plt.xticks(xticks,xticks,fontsize=6,fontweight='normal') - plt.gca().invert_yaxis() - plt.axis(axes) - if i == 0: - plt.ylabel('Pressure (hPa)',fontsize=8,fontweight='normal') - plt.xlabel('Latitude',fontsize=8,fontweight='normal') - - sig_levs = [0.95,1] - mpl.rcParams['hatch.linewidth'] = 0.2 - plt.contourf(x,y,sigs_out[i],colors='black',vmin=0.95,vmax=1,levels=sig_levs,hatches=['......','......'],alpha=0.0) - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('eqbo - wqbo',fontsize=10) - - # Add colorbar # - - oticks = [-100,-50,-25,-10,-6,-2,2,6,10,25,50,100] - cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) - cbar = fig.colorbar(cs, cax=cb_ax, ticks=oticks,orientation='horizontal') - cbar.ax.tick_params(labelsize=8, width=1) - cbar.ax.set_xticklabels(oticks,weight='normal') - - plt.subplots_adjust(top=0.86,bottom=0.16,hspace=0.5,wspace=0.55,left=0.08,right=0.95) - - return fig, ax +def qbo_vt(vt, negative_indices, positive_indices, QBOisobar, titles, plot_months, axes): + r""" Compute the zonal mean eddy heat flux response to the QBO. Anomalies are defined as + deviations from the seasonal cycle and composites of anomalies are made during easterly QBO, + westerly QBO, and then their difference is shown. Stippling is used to denote statistical + significance on the EQBO minus WQBO composites. + + Parameters + ---------- + vt : xarray.DataArray + The zonal mean eddy heat flux. This quantity is calculated using the + compute_total_eddy_heat_flux function given in the driver script stc_qbo_enso.py + + negative_indices : list + A list of easterly QBO years. + + positive_indices : list + A list of westerly QBO years. + + QBOisobar : int + An integer defined by the user in the config.jsonc file specifying what isobar + is used to index the QBO + + titles : list of strings + A list of month names that will be used as the titles for each column of subplots + + plot_months : list + A list of numbers corresponding to each month (e.g., 10 = october) + + axes : A list of numbers used to set the pressure and latitude limits of the subplots. + [0,90,1000,1] are used for the N. hemisphere and [-90,0,1000,1] for S. hemisphere + + Returns + ------- + 3 row by 6 column plot of easterly QBO vt anomalies (top row), westerly QBO vt + anomalies (middle), and their difference (bottom row) with stippling highlighting differences + between EQBO and WQBO winds statistically significant at the 95% level using a two-sided t-test. + + Notes + ----- + The input field vt is assumed to have dimensions named "lat" and "lev". + E.g., if your data has dimensions "latitude" and/or "level", + use the rename method: + ds.rename({'latitude':'lat','level':'lev'}) + """ + + eqbo_out = [] + wqbo_out = [] + diff_out = [] + sigs_out = [] + clim_out = [] + + for mon in plot_months: + + clim = vt.sel(time=vt.time.dt.month.isin([mon])) + clim_out.append(clim.mean('time').values) + + anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") + + # For NH, QBO index is based on Oct-Nov year. The plot will show Oct-Feb. Note that Jan-Feb are selected + # using QBO index year + 1 + # For SH, QBO index is based on Jul-Aug year. The plot will show Jul-Nov. + + if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: + tmp_negative_indices = np.add(negative_indices, 0) + tmp_positive_indices = np.add(positive_indices, 0) + if mon == 1 or mon == 2 or mon == 3: + tmp_negative_indices = np.add(negative_indices, 1) + tmp_positive_indices = np.add(positive_indices, 1) + + eqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) + wqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) + + t, p = stats.ttest_ind(eqbo_tmp.values, wqbo_tmp.values, axis=0, nan_policy='omit') + + sigs_out.append(np.subtract(1, p)) + diff_out.append(np.subtract(eqbo_tmp.mean('time').values, wqbo_tmp.mean('time').values)) + eqbo_out.append(eqbo_tmp.mean('time').values) + wqbo_out.append(wqbo_tmp.mean('time').values) + + clim.close() + anom.close() + eqbo_tmp.close() + wqbo_tmp.close() + vt.close() + + eqbo_out = np.array(eqbo_out) + wqbo_out = np.array(wqbo_out) + diff_out = np.array(diff_out) + sigs_out = np.array(sigs_out) + clim_out = np.array(clim_out) + + # ############ Begin the plotting ############ + + fig, ax = plt.subplots() + + mpl.rcParams['font.sans-serif'].insert(0, 'Arial') + + blevs = [] + + blevs.append(-2) + blevs.append(-6) + blevs.append(-10) + blevs.append(-25) + blevs.append(-50) + blevs.append(-100) + + blevs.append(2) + blevs.append(6) + blevs.append(10) + blevs.append(25) + blevs.append(50) + blevs.append(100) + + blevs = np.sort(blevs) + print(blevs) + + cmin = -200 + cmax = 200 + clevs = np.linspace(cmin, cmax, num=41) + clevs = [v for v in clevs if v != 0] + + plt.suptitle('QBO (5S-5N index @ %s hPa) zonal-mean eddy heat flux (Km/s)' % int(QBOisobar), fontsize=12, + fontweight='normal') + + # Add colormap # + from palettable.colorbrewer.diverging import RdBu_11 + cmap1 = RdBu_11.mpl_colormap.reversed() + + x, y = np.meshgrid(vt.lat.values, vt.lev.values) + + cols = [0, 1, 2, 3, 4] + + for i in cols: + + print(i) + + # eqbo # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(0, cols[i])) + plt.title('%s' % titles[i], fontsize=10, y=0.93, fontweight='normal') + + cs = plt.contourf(x, y, eqbo_out[i], blevs, + norm=mpl.colors.SymLogNorm(linthresh=2, linscale=1, vmin=-100, vmax=100), cmap=cmap1, + extend="both", zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 5, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('eqbo (%s seasons)' % int(len(negative_indices)), fontsize=10) + + # wqbo # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(1, cols[i])) + + cs = plt.contourf(x, y, wqbo_out[i], blevs, + norm=mpl.colors.SymLogNorm(linthresh=2, linscale=1, vmin=-100, vmax=100), cmap=cmap1, + extend="both", zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 5, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('wqbo (%s seasons)' % int(len(positive_indices)), fontsize=10) + + # Diff: eqbo minus wqbo # + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(2, cols[i])) + + cs = plt.contourf(x, y, diff_out[i], blevs, + norm=mpl.colors.SymLogNorm(linthresh=2, linscale=1, vmin=-100, vmax=100), cmap=cmap1, + extend="both", zorder=1) + + mpl.rcParams["lines.linewidth"] = 0.2 + mpl.rcParams["lines.dashed_pattern"] = 10, 3 + black = plt.contour(x, y, clim_out[i], colors='k', levels=clevs, extend="both", vmin=cmin, vmax=cmax, + zorder=3) + plt.clabel(black, black.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + + plt.semilogy() + yticks = [1, 5, 10, 100, 1000] + plt.yticks(yticks, yticks, fontsize=6, fontweight='normal') + xticks = [-90, -60, -30, 0, 30, 60, 90] + plt.xticks(xticks, xticks, fontsize=6, fontweight='normal') + plt.gca().invert_yaxis() + plt.axis(axes) + if i == 0: + plt.ylabel('Pressure (hPa)', fontsize=8, fontweight='normal') + plt.xlabel('Latitude', fontsize=8, fontweight='normal') + + sig_levs = [0.95, 1] + mpl.rcParams['hatch.linewidth'] = 0.2 + plt.contourf(x, y, sigs_out[i], colors='black', vmin=0.95, vmax=1, levels=sig_levs, + hatches=['......', '......'], alpha=0.0) + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('eqbo - wqbo', fontsize=10) + + # Add colorbar # + + oticks = [-100, -50, -25, -10, -6, -2, 2, 6, 10, 25, 50, 100] + cb_ax = fig.add_axes([0.365, 0.04, 0.30, 0.015]) + cbar = fig.colorbar(cs, cax=cb_ax, ticks=oticks, orientation='horizontal') + cbar.ax.tick_params(labelsize=8, width=1) + cbar.ax.set_xticklabels(oticks, weight='normal') + + plt.subplots_adjust(top=0.86, bottom=0.16, hspace=0.5, wspace=0.55, left=0.08, right=0.95) + + return fig, ax + + ################################################################################################## ################################################################################################## ################################################################################################## -def qbo_slp(ps,negative_indices,positive_indices,QBOisobar,titles,plot_months,projection,axes): - - r""" Compute the sea level pressure response to QBO. Anomalies are defined as - deviations from the seasonal cycle and composites of anomalies are made during easterly QBO years, - westerly QBO years, and then their difference is shown. Stippling is used to denote statistical - significance on the EQBO minus WQBO composites. - - Parameters - ---------- - ps : xarray.DataArray - The sea level pressure. - - negative_indices : list - A list of easterly QBO years. - - positive_indices : list - A list of westerly QBO years. - - QBOisobar : int - An integer defined by the user in the config.jsonc file specifying what isobar - is used to index the QBO - - titles : list of strings - A list of month names that will be used as the titles for each column of subplots - - plot_months : list - A list of numbers corresponding to each month (e.g., 10 = october) - - projection : ccrs.NorthPolarStereo() or ccrs.SouthPolarStereo() - - axes : A list of numbers used to set the longitude and latitude bounds of the subplots. - [-180, 180, 20, 90] are used for the N. hemisphere and [-180, 180, -90, -20] for S. hemisphere - - Returns - ------- - 3 row by 6 column plot of easterly QBO ps anomalies (top row), westerly QBO ps anomalies (middle), - and their difference (bottom row) with stippling highlighting differences between EQBO - and WQBO winds statistically significant at the 95% level using a two-sided t-test. - - Notes - ----- - The input field ps is assumed to have dimensions named "lat" and "lon". - E.g., if your data has dimensions "latitude" and/or "level", - use the rename method: - ds.rename({'latitude':'lat','longitude':'lon'}) - - The input field ps is expected to have units of hPa. Directly below, the code - will check to see if the units are Pa instead, and if they are, convert them to hPa. - """ - - if getattr(ps,'units') == 'Pa': - print(f'**Converting pressure levels to hPa') - ps.attrs['units'] = 'hPa' - ps.values[:] = ps.values/100. - - print (np.nanmin(ps.values)) - print (np.nanmedian(ps.values)) - print (np.nanmean(ps.values)) - print (np.nanmax(ps.values)) - - eqbo_out = [] - wqbo_out = [] - diff_out = [] - sigs_out = [] - clim_out = [] - - for mon in plot_months: - - clim = ps.sel(time=ps.time.dt.month.isin([mon])) - clim_out.append(clim.mean('time').values) - - anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") - - if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: - tmp_negative_indices = np.add(negative_indices,0) - tmp_positive_indices = np.add(positive_indices,0) - if mon == 1 or mon == 2 or mon == 3: - tmp_negative_indices = np.add(negative_indices,1) - tmp_positive_indices = np.add(positive_indices,1) - - eqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) - wqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) - - t,p = stats.ttest_ind(eqbo_tmp.values,wqbo_tmp.values,axis=0,nan_policy='omit') - - sigs_out.append(np.subtract(1,p)) - diff_out.append(np.subtract(eqbo_tmp.mean('time').values,wqbo_tmp.mean('time').values)) - eqbo_out.append(eqbo_tmp.mean('time').values) - wqbo_out.append(wqbo_tmp.mean('time').values) - - clim.close() - anom.close() - eqbo_tmp.close() - wqbo_tmp.close() - ps.close() - - eqbo_out = np.array(eqbo_out) - wqbo_out = np.array(wqbo_out) - diff_out = np.array(diff_out) - sigs_out = np.array(sigs_out) - clim_out = np.array(clim_out) - - ############# Begin the plotting ############ - - fig, ax = plt.subplots() - - mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - - vmin = -10 - vmax = 10 - vlevs = np.linspace(vmin,vmax,num=21) - vlevs = [v for v in vlevs if v != 0] - ticks = [vmin,vmin/2,0,vmax/2,vmax] - - cmin = 900 - cmax = 1100 - clevs = np.linspace(cmin,cmax,num=21) - - plt.suptitle('QBO (5S-5N index @ %s hPa) sea level pressure (hPa)' % QBOisobar,fontsize=12,fontweight='normal') - - # Add colormap # - - from palettable.colorbrewer.diverging import RdBu_11 - cmap1=RdBu_11.mpl_colormap.reversed() - - lons = ps.lon.values - lats = ps.lat.values - - cols = [0,1,2,3,4] - - for i in cols: - - print (i) - - ######## - # eqbo # - ######## - - ax1 = plt.subplot2grid(shape=(3,5), loc=(0,cols[i]), projection=projection) - ax1.set_extent(axes, ccrs.PlateCarree()) - plt.title('%s' % titles[i],fontsize=10,y=0.93,fontweight='normal') - - # Plot style features # - - ax1.coastlines(linewidth=0.25) - theta = np.linspace(0, 2*np.pi, 100) - center, radius = [0.5, 0.5], 0.5 - verts = np.vstack([np.sin(theta), np.cos(theta)]).T - circle = mpath.Path(verts * radius + center) - ax1.set_boundary(circle, transform=ax1.transAxes) - pos1 = ax1.get_position() - plt.title("%s" % titles[i], fontsize=10,fontweight='normal',y=0.98) - cyclic_z, cyclic_lon = add_cyclic_point(eqbo_out[i], coord=lons) +def qbo_slp(ps, negative_indices, positive_indices, QBOisobar, titles, plot_months, projection, axes): + r""" Compute the sea level pressure response to QBO. Anomalies are defined as + deviations from the seasonal cycle and composites of anomalies are made during easterly QBO years, + westerly QBO years, and then their difference is shown. Stippling is used to denote statistical + significance on the EQBO minus WQBO composites. + + Parameters + ---------- + ps : xarray.DataArray + The sea level pressure. + + negative_indices : list + A list of easterly QBO years. + + positive_indices : list + A list of westerly QBO years. + + QBOisobar : int + An integer defined by the user in the config.jsonc file specifying what isobar + is used to index the QBO + + titles : list of strings + A list of month names that will be used as the titles for each column of subplots + + plot_months : list + A list of numbers corresponding to each month (e.g., 10 = october) + + projection : ccrs.NorthPolarStereo() or ccrs.SouthPolarStereo() + + axes : A list of numbers used to set the longitude and latitude bounds of the subplots. + [-180, 180, 20, 90] are used for the N. hemisphere and [-180, 180, -90, -20] for S. hemisphere + + Returns + ------- + 3 row by 6 column plot of easterly QBO ps anomalies (top row), westerly QBO ps anomalies (middle), + and their difference (bottom row) with stippling highlighting differences between EQBO + and WQBO winds statistically significant at the 95% level using a two-sided t-test. + + Notes + ----- + The input field ps is assumed to have dimensions named "lat" and "lon". + E.g., if your data has dimensions "latitude" and/or "level", + use the rename method: + ds.rename({'latitude':'lat','longitude':'lon'}) + + The input field ps is expected to have units of hPa. Directly below, the code + will check to see if the units are Pa instead, and if they are, convert them to hPa. + """ + + if getattr(ps, 'units') == 'Pa': + print(f'**Converting pressure levels to hPa') + ps.attrs['units'] = 'hPa' + ps.values[:] = ps.values / 100. + + print(np.nanmin(ps.values)) + print(np.nanmedian(ps.values)) + print(np.nanmean(ps.values)) + print(np.nanmax(ps.values)) + + eqbo_out = [] + wqbo_out = [] + diff_out = [] + sigs_out = [] + clim_out = [] + + for mon in plot_months: + + clim = ps.sel(time=ps.time.dt.month.isin([mon])) + clim_out.append(clim.mean('time').values) + + anom = clim.groupby("time.month") - clim.groupby("time.month").mean("time") + + if mon == 7 or mon == 8 or mon == 9 or mon == 10 or mon == 11 or mon == 12: + tmp_negative_indices = np.add(negative_indices, 0) + tmp_positive_indices = np.add(positive_indices, 0) + if mon == 1 or mon == 2 or mon == 3: + tmp_negative_indices = np.add(negative_indices, 1) + tmp_positive_indices = np.add(positive_indices, 1) + + eqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_negative_indices])) + wqbo_tmp = anom.sel(time=anom.time.dt.year.isin([tmp_positive_indices])) + + t, p = stats.ttest_ind(eqbo_tmp.values, wqbo_tmp.values, axis=0, nan_policy='omit') - # Plot anomalies # + sigs_out.append(np.subtract(1, p)) + diff_out.append(np.subtract(eqbo_tmp.mean('time').values, wqbo_tmp.mean('time').values)) + eqbo_out.append(eqbo_tmp.mean('time').values) + wqbo_out.append(wqbo_tmp.mean('time').values) - contourf = ax1.contourf(cyclic_lon, lats, cyclic_z,transform=ccrs.PlateCarree(),cmap=cmap1,vmin=vmin,vmax=vmax,levels=vlevs,extend='both',zorder=1) + clim.close() + anom.close() + eqbo_tmp.close() + wqbo_tmp.close() + ps.close() - # Overlay the climatology # + eqbo_out = np.array(eqbo_out) + wqbo_out = np.array(wqbo_out) + diff_out = np.array(diff_out) + sigs_out = np.array(sigs_out) + clim_out = np.array(clim_out) - cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) - cs = ax1.contour(cyclic_lon, lats, cyclic_clim,transform=ccrs.PlateCarree(),colors='k',linewidths=0.5,vmin=cmin,vmax=cmax,levels=clevs,extend='both',zorder=3) + # ############ Begin the plotting ############ - plt.rc('font',weight='normal') - plt.clabel(cs,cs.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - plt.rc('font',weight='normal') + fig, ax = plt.subplots() - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('eqbo (%s seasons)' % int(len(negative_indices)),fontsize=10) - ax2.spines['top'].set_visible(False) - ax2.spines['right'].set_visible(False) - ax2.spines['bottom'].set_visible(False) - ax2.spines['left'].set_visible(False) - ax2.get_yaxis().set_ticks([]) + mpl.rcParams['font.sans-serif'].insert(0, 'Arial') - ######## - # wqbo # - ######## + vmin = -10 + vmax = 10 + vlevs = np.linspace(vmin, vmax, num=21) + vlevs = [v for v in vlevs if v != 0] + ticks = [vmin, vmin / 2, 0, vmax / 2, vmax] - ax1 = plt.subplot2grid(shape=(3,5), loc=(1,cols[i]), projection=projection) - ax1.set_extent(axes, ccrs.PlateCarree()) + cmin = 900 + cmax = 1100 + clevs = np.linspace(cmin, cmax, num=21) - # Plot style features # + plt.suptitle('QBO (5S-5N index @ %s hPa) sea level pressure (hPa)' % QBOisobar, fontsize=12, fontweight='normal') - ax1.coastlines(linewidth=0.25) - theta = np.linspace(0, 2*np.pi, 100) - center, radius = [0.5, 0.5], 0.5 - verts = np.vstack([np.sin(theta), np.cos(theta)]).T - circle = mpath.Path(verts * radius + center) - ax1.set_boundary(circle, transform=ax1.transAxes) - pos1 = ax1.get_position() - plt.title("%s" % titles[i], fontsize=10,fontweight='normal',y=0.98) - cyclic_z, cyclic_lon = add_cyclic_point(wqbo_out[i], coord=lons) + # Add colormap # - # Plot anomalies # + from palettable.colorbrewer.diverging import RdBu_11 + cmap1 = RdBu_11.mpl_colormap.reversed() - contourf = ax1.contourf(cyclic_lon, lats, cyclic_z,transform=ccrs.PlateCarree(),cmap=cmap1,vmin=vmin,vmax=vmax,levels=vlevs,extend='both',zorder=1) + lons = ps.lon.values + lats = ps.lat.values + + cols = [0, 1, 2, 3, 4] + + for i in cols: + + print(i) + + ######## + # eqbo # + ######## - # Overlay the climatology # + ax1 = plt.subplot2grid(shape=(3, 5), loc=(0, cols[i]), projection=projection) + ax1.set_extent(axes, ccrs.PlateCarree()) + plt.title('%s' % titles[i], fontsize=10, y=0.93, fontweight='normal') + + # Plot style features # + + ax1.coastlines(linewidth=0.25) + theta = np.linspace(0, 2 * np.pi, 100) + center, radius = [0.5, 0.5], 0.5 + verts = np.vstack([np.sin(theta), np.cos(theta)]).T + circle = mpath.Path(verts * radius + center) + ax1.set_boundary(circle, transform=ax1.transAxes) + pos1 = ax1.get_position() + plt.title("%s" % titles[i], fontsize=10, fontweight='normal', y=0.98) + cyclic_z, cyclic_lon = add_cyclic_point(eqbo_out[i], coord=lons) - cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) - cs = ax1.contour(cyclic_lon, lats, cyclic_clim,transform=ccrs.PlateCarree(),colors='k',linewidths=0.5,vmin=cmin,vmax=cmax,levels=clevs,extend='both',zorder=3) + # Plot anomalies # - plt.rc('font',weight='normal') - plt.clabel(cs,cs.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - plt.rc('font',weight='normal') + contourf = ax1.contourf(cyclic_lon, lats, cyclic_z, transform=ccrs.PlateCarree(), cmap=cmap1, vmin=vmin, + vmax=vmax, levels=vlevs, extend='both', zorder=1) - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('wqbo (%s seasons)' % int(len(positive_indices)),fontsize=10) - ax2.spines['top'].set_visible(False) - ax2.spines['right'].set_visible(False) - ax2.spines['bottom'].set_visible(False) - ax2.spines['left'].set_visible(False) - ax2.get_yaxis().set_ticks([]) + # Overlay the climatology # - ############## - # Difference # - ############## - - ax1 = plt.subplot2grid(shape=(3,5), loc=(2,cols[i]), projection=projection) - ax1.set_extent(axes, ccrs.PlateCarree()) - - # Plot style features # - - ax1.coastlines(linewidth=0.25) - theta = np.linspace(0, 2*np.pi, 100) - center, radius = [0.5, 0.5], 0.5 - verts = np.vstack([np.sin(theta), np.cos(theta)]).T - circle = mpath.Path(verts * radius + center) - ax1.set_boundary(circle, transform=ax1.transAxes) - pos1 = ax1.get_position() - plt.title("%s" % titles[i], fontsize=10,fontweight='normal',y=0.98) - cyclic_z, cyclic_lon = add_cyclic_point(diff_out[i], coord=lons) - - # Plot anomalies # - - contourf = ax1.contourf(cyclic_lon, lats, cyclic_z,transform=ccrs.PlateCarree(),cmap=cmap1,vmin=vmin,vmax=vmax,levels=vlevs,extend='both',zorder=1) - - # Statistical significance # - - sig_levs = [0.95,1] - mpl.rcParams['hatch.linewidth'] = 0.2 - cyclic_sig, cyclic_lontmp = add_cyclic_point(sigs_out[i], coord=lons) - ax1.contourf(cyclic_lon, lats, cyclic_sig,transform=ccrs.PlateCarree(),colors='black',vmin=0.95,vmax=1,levels=sig_levs,hatches=['......','......'],alpha=0.0,zorder=2) - - # Overlay the climatology # - - cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) - cs = ax1.contour(cyclic_lon, lats, cyclic_clim,transform=ccrs.PlateCarree(),colors='k',linewidths=0.5,vmin=cmin,vmax=cmax,levels=clevs,extend='both',zorder=3) - - plt.rc('font',weight='normal') - plt.clabel(cs,cs.levels[:],inline=1,fmt='%1.0f',fontsize=4,colors='k',inline_spacing=1) - plt.rc('font',weight='normal') - - if i == 4: - ax2 = ax1.twinx() - yticks = [0,0.5,1.0] - ylabels = ['','',''] - ax2.set_yticks(yticks) - ax2.set_yticklabels(ylabels, fontsize=8, fontweight = 'normal') - ax2.set_ylabel('eqbo - wqbo',fontsize=10) - ax2.spines['top'].set_visible(False) - ax2.spines['right'].set_visible(False) - ax2.spines['bottom'].set_visible(False) - ax2.spines['left'].set_visible(False) - ax2.get_yaxis().set_ticks([]) - - # Add colorbar # + cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) + cs = ax1.contour(cyclic_lon, lats, cyclic_clim, transform=ccrs.PlateCarree(), colors='k', linewidths=0.5, + vmin=cmin, vmax=cmax, levels=clevs, extend='both', zorder=3) - cb_ax = fig.add_axes([0.35, 0.05, 0.30, 0.015]) - cbar = fig.colorbar(contourf, cax=cb_ax, ticks=ticks,orientation='horizontal') - cbar.ax.tick_params(labelsize=8, width=1) - cbar.ax.set_xticklabels(ticks,weight='normal') + plt.rc('font', weight='normal') + plt.clabel(cs, cs.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + plt.rc('font', weight='normal') + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('eqbo (%s seasons)' % int(len(negative_indices)), fontsize=10) + ax2.spines['top'].set_visible(False) + ax2.spines['right'].set_visible(False) + ax2.spines['bottom'].set_visible(False) + ax2.spines['left'].set_visible(False) + ax2.get_yaxis().set_ticks([]) + + ######## + # wqbo # + ######## + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(1, cols[i]), projection=projection) + ax1.set_extent(axes, ccrs.PlateCarree()) + + # Plot style features # + + ax1.coastlines(linewidth=0.25) + theta = np.linspace(0, 2 * np.pi, 100) + center, radius = [0.5, 0.5], 0.5 + verts = np.vstack([np.sin(theta), np.cos(theta)]).T + circle = mpath.Path(verts * radius + center) + ax1.set_boundary(circle, transform=ax1.transAxes) + pos1 = ax1.get_position() + plt.title("%s" % titles[i], fontsize=10, fontweight='normal', y=0.98) + cyclic_z, cyclic_lon = add_cyclic_point(wqbo_out[i], coord=lons) - plt.subplots_adjust(top=0.86,bottom=0.09,hspace=0.3,wspace=0.0,left=0.02,right=0.94) - - return fig, ax \ No newline at end of file + # Plot anomalies # + + contourf = ax1.contourf(cyclic_lon, lats, cyclic_z, transform=ccrs.PlateCarree(), cmap=cmap1, vmin=vmin, + vmax=vmax, levels=vlevs, extend='both', zorder=1) + + # Overlay the climatology # + + cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) + cs = ax1.contour(cyclic_lon, lats, cyclic_clim, transform=ccrs.PlateCarree(), colors='k', linewidths=0.5, + vmin=cmin, vmax=cmax, levels=clevs, extend='both', zorder=3) + + plt.rc('font', weight='normal') + plt.clabel(cs, cs.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + plt.rc('font', weight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('wqbo (%s seasons)' % int(len(positive_indices)), fontsize=10) + ax2.spines['top'].set_visible(False) + ax2.spines['right'].set_visible(False) + ax2.spines['bottom'].set_visible(False) + ax2.spines['left'].set_visible(False) + ax2.get_yaxis().set_ticks([]) + + ############## + # Difference # + ############## + + ax1 = plt.subplot2grid(shape=(3, 5), loc=(2, cols[i]), projection=projection) + ax1.set_extent(axes, ccrs.PlateCarree()) + + # Plot style features # + + ax1.coastlines(linewidth=0.25) + theta = np.linspace(0, 2 * np.pi, 100) + center, radius = [0.5, 0.5], 0.5 + verts = np.vstack([np.sin(theta), np.cos(theta)]).T + circle = mpath.Path(verts * radius + center) + ax1.set_boundary(circle, transform=ax1.transAxes) + pos1 = ax1.get_position() + plt.title("%s" % titles[i], fontsize=10, fontweight='normal', y=0.98) + cyclic_z, cyclic_lon = add_cyclic_point(diff_out[i], coord=lons) + + # Plot anomalies # + + contourf = ax1.contourf(cyclic_lon, lats, cyclic_z, transform=ccrs.PlateCarree(), cmap=cmap1, vmin=vmin, + vmax=vmax, levels=vlevs, extend='both', zorder=1) + + # Statistical significance # + + sig_levs = [0.95, 1] + mpl.rcParams['hatch.linewidth'] = 0.2 + cyclic_sig, cyclic_lontmp = add_cyclic_point(sigs_out[i], coord=lons) + ax1.contourf(cyclic_lon, lats, cyclic_sig, transform=ccrs.PlateCarree(), colors='black', vmin=0.95, vmax=1, + levels=sig_levs, hatches=['......', '......'], alpha=0.0, zorder=2) + + # Overlay the climatology # + + cyclic_clim, cyclic_lon = add_cyclic_point(clim_out[i], coord=lons) + cs = ax1.contour(cyclic_lon, lats, cyclic_clim, transform=ccrs.PlateCarree(), colors='k', linewidths=0.5, + vmin=cmin, vmax=cmax, levels=clevs, extend='both', zorder=3) + + plt.rc('font', weight='normal') + plt.clabel(cs, cs.levels[:], inline=1, fmt='%1.0f', fontsize=4, colors='k', inline_spacing=1) + plt.rc('font', weight='normal') + + if i == 4: + ax2 = ax1.twinx() + yticks = [0, 0.5, 1.0] + ylabels = ['', '', ''] + ax2.set_yticks(yticks) + ax2.set_yticklabels(ylabels, fontsize=8, fontweight='normal') + ax2.set_ylabel('eqbo - wqbo', fontsize=10) + ax2.spines['top'].set_visible(False) + ax2.spines['right'].set_visible(False) + ax2.spines['bottom'].set_visible(False) + ax2.spines['left'].set_visible(False) + ax2.get_yaxis().set_ticks([]) + + # Add colorbar # + + cb_ax = fig.add_axes([0.35, 0.05, 0.30, 0.015]) + cbar = fig.colorbar(contourf, cax=cb_ax, ticks=ticks, orientation='horizontal') + cbar.ax.tick_params(labelsize=8, width=1) + cbar.ax.set_xticklabels(ticks, weight='normal') + + plt.subplots_adjust(top=0.86, bottom=0.09, hspace=0.3, wspace=0.0, left=0.02, right=0.94) + + return fig, ax diff --git a/diagnostics/stc_spv_extremes/stc_spv_extremes.html b/diagnostics/stc_spv_extremes/stc_spv_extremes.html index 88cdd0d29..af8954712 100644 --- a/diagnostics/stc_spv_extremes/stc_spv_extremes.html +++ b/diagnostics/stc_spv_extremes/stc_spv_extremes.html @@ -43,7 +43,7 @@

Relationship of extreme stratospheric events to tropospheric and surface con

{{CASENAME}}

-Frequency of SSWs and VIs by month of occurrence +< color=navy>Frequency of SSWs and VIs by month of occurrence
Model @@ -63,7 +63,7 @@

{{CASENAME}}

-Vertical coupling (dripping paint) composite plots +< color=navy>Vertical coupling (dripping paint) composite plots
Model @@ -83,7 +83,7 @@

{{CASENAME}}

-Troposphere and surface impacts before and after SSWs and VIs +< color=navy>Troposphere and surface impacts before and after SSWs and VIs
Model @@ -100,4 +100,3 @@

{{CASENAME}}

SH Reanalysis Maps
- diff --git a/diagnostics/stc_spv_extremes/stc_spv_extremes.py b/diagnostics/stc_spv_extremes/stc_spv_extremes.py index 12b548262..6f7404759 100644 --- a/diagnostics/stc_spv_extremes/stc_spv_extremes.py +++ b/diagnostics/stc_spv_extremes/stc_spv_extremes.py @@ -79,9 +79,6 @@ from cartopy.util import add_cyclic_point from matplotlib import pyplot as plt import statsmodels.api as sm - -from datetime import datetime,timedelta - from stc_spv_extremes_defs import lat_avg import stc_spv_extremes_defs @@ -93,8 +90,8 @@ mpl.rcParams['font.size'] = 12 mpl.rcParams['hatch.color']='gray' +# Plotting Functions *************************************** -#*********** Plotting Functions *************************************** def plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi): r""" Calculate SSW and VI central dates, save them to a text file, and plot @@ -136,11 +133,11 @@ def plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi): ssw = None vi = None - if (hemi == 'NH'): + if hemi == 'NH': # Need SSW central dates uzm_spec = uzm_10.interp(lat=60) - ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec,hem=hemi) + ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec, hem=hemi) # Determine SSW frequency (in NH, the total years are one less than in # the full uzm_10 data, because each winter straddles two years) @@ -150,25 +147,33 @@ def plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi): for item in ssw: file_handler.write(f"{item}\n") file_handler.write(f"Frequency for {FIRSTYR+1}-{LASTYR}: {tot_freq_ssw:.2f}") - - #Determine seasonality of frequency + + # Determine seasonality of frequency + seas_ssw = None if ssw: - nov_freq = len([i for i in ssw if i.split("-")[1]=='11'])/(len(ssw)) - dec_freq = len([i for i in ssw if i.split("-")[1]=='12'])/(len(ssw)) - jan_freq = len([i for i in ssw if i.split("-")[1]=='01'])/(len(ssw)) - feb_freq = len([i for i in ssw if i.split("-")[1]=='02'])/(len(ssw)) - mar_freq = len([i for i in ssw if i.split("-")[1]=='03'])/(len(ssw)) + nov_freq = len([i for i in ssw if i.split("-")[1] == '11'])/(len(ssw)) + dec_freq = len([i for i in ssw if i.split("-")[1 ]== '12'])/(len(ssw)) + jan_freq = len([i for i in ssw if i.split("-")[1] == '01'])/(len(ssw)) + feb_freq = len([i for i in ssw if i.split("-")[1] == '02'])/(len(ssw)) + mar_freq = len([i for i in ssw if i.split("-")[1] == '03'])/(len(ssw)) seas_ssw = np.array([nov_freq, dec_freq, jan_freq, feb_freq, mar_freq]) - ci_snov_lo, ci_snov_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='11']), len(ssw), alpha=0.05, method='wilson') - ci_sdec_lo, ci_sdec_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='12']), len(ssw), alpha=0.05, method='wilson') - ci_sjan_lo, ci_sjan_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='01']), len(ssw), alpha=0.05, method='wilson') - ci_sfeb_lo, ci_sfeb_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='02']), len(ssw), alpha=0.05, method='wilson') - ci_smar_lo, ci_smar_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='03']), len(ssw), alpha=0.05, method='wilson') - - ci_ssw = [(seas_ssw[0]-ci_snov_lo, ci_snov_up-seas_ssw[0]), (seas_ssw[1]-ci_sdec_lo, ci_sdec_up-seas_ssw[1]), - (seas_ssw[2]-ci_sjan_lo, ci_sjan_up-seas_ssw[2]), (seas_ssw[3]-ci_sfeb_lo, ci_sfeb_up-seas_ssw[3]), + ci_snov_lo, ci_snov_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '11']), + len(ssw), alpha=0.05, method='wilson') + ci_sdec_lo, ci_sdec_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '12']), + len(ssw), alpha=0.05, method='wilson') + ci_sjan_lo, ci_sjan_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '01']), + len(ssw), alpha=0.05, method='wilson') + ci_sfeb_lo, ci_sfeb_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '02']), + len(ssw), alpha=0.05, method='wilson') + ci_smar_lo, ci_smar_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '03']), + len(ssw), alpha=0.05, method='wilson') + + ci_ssw = [(seas_ssw[0]-ci_snov_lo, ci_snov_up-seas_ssw[0]), (seas_ssw[1]-ci_sdec_lo, + ci_sdec_up-seas_ssw[1]), + (seas_ssw[2]-ci_sjan_lo, ci_sjan_up-seas_ssw[2]), (seas_ssw[3]-ci_sfeb_lo, + ci_sfeb_up-seas_ssw[3]), (seas_ssw[4]-ci_smar_lo, ci_smar_up-seas_ssw[4])] # Need VI central dates @@ -183,26 +188,31 @@ def plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi): file_handler.write(f"{item}\n") file_handler.write(f"Frequency for {FIRSTYR+1}-{LASTYR}: {tot_freq_vi:.2f}") - #Determine seasonality of frequency + # Determine seasonality of frequency seas_vi = None if vi: - nov_freq = len([i for i in vi if i.split("-")[1]=='11'])/(len(vi)) - dec_freq = len([i for i in vi if i.split("-")[1]=='12'])/(len(vi)) - jan_freq = len([i for i in vi if i.split("-")[1]=='01'])/(len(vi)) - feb_freq = len([i for i in vi if i.split("-")[1]=='02'])/(len(vi)) - mar_freq = len([i for i in vi if i.split("-")[1]=='03'])/(len(vi)) + nov_freq = len([i for i in vi if i.split("-")[1] == '11'])/(len(vi)) + dec_freq = len([i for i in vi if i.split("-")[1] == '12'])/(len(vi)) + jan_freq = len([i for i in vi if i.split("-")[1] == '01'])/(len(vi)) + feb_freq = len([i for i in vi if i.split("-")[1] == '02'])/(len(vi)) + mar_freq = len([i for i in vi if i.split("-")[1] == '03'])/(len(vi)) seas_vi = np.array([nov_freq, dec_freq, jan_freq, feb_freq, mar_freq]) - ci_vnov_lo, ci_vnov_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='11']), len(vi), alpha=0.05, method='wilson') - ci_vdec_lo, ci_vdec_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='12']), len(vi), alpha=0.05, method='wilson') - ci_vjan_lo, ci_vjan_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='01']), len(vi), alpha=0.05, method='wilson') - ci_vfeb_lo, ci_vfeb_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='02']), len(vi), alpha=0.05, method='wilson') - ci_vmar_lo, ci_vmar_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='03']), len(vi), alpha=0.05, method='wilson') + ci_vnov_lo, ci_vnov_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '11']), + len(vi), alpha=0.05, method='wilson') + ci_vdec_lo, ci_vdec_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '12']), + len(vi), alpha=0.05, method='wilson') + ci_vjan_lo, ci_vjan_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '01']), + len(vi), alpha=0.05, method='wilson') + ci_vfeb_lo, ci_vfeb_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '02']), + len(vi), alpha=0.05, method='wilson') + ci_vmar_lo, ci_vmar_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '03']), + len(vi), alpha=0.05, method='wilson') ci_vi = [(seas_vi[0]-ci_vnov_lo, ci_vnov_up-seas_vi[0]), (seas_vi[1]-ci_vdec_lo, ci_vdec_up-seas_vi[1]), (seas_vi[2]-ci_vjan_lo, ci_vjan_up-seas_vi[2]), (seas_vi[3]-ci_vfeb_lo, ci_vfeb_up-seas_vi[3]), (seas_vi[4]-ci_vmar_lo, ci_vmar_up-seas_vi[4])] - if (hemi == 'SH'): + if hemi == 'SH': # Need SSW central dates uzm_spec = uzm_10.interp(lat=-60) @@ -216,27 +226,34 @@ def plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi): file_handler.write(f"{item}\n") file_handler.write(f"Frequency for {FIRSTYR}-{LASTYR}: {tot_freq_ssw:.2f}") - #Determine seasonality of frequency + # Determine seasonality of frequency seas_ssw = None if ssw: - jun_freq = len([i for i in ssw if i.split("-")[1]=='06'])/(len(ssw)) - jul_freq = len([i for i in ssw if i.split("-")[1]=='07'])/(len(ssw)) - aug_freq = len([i for i in ssw if i.split("-")[1]=='08'])/(len(ssw)) - sep_freq = len([i for i in ssw if i.split("-")[1]=='09'])/(len(ssw)) - oct_freq = len([i for i in ssw if i.split("-")[1]=='10'])/(len(ssw)) + jun_freq = len([i for i in ssw if i.split("-")[1] == '06'])/(len(ssw)) + jul_freq = len([i for i in ssw if i.split("-")[1] == '07'])/(len(ssw)) + aug_freq = len([i for i in ssw if i.split("-")[1] == '08'])/(len(ssw)) + sep_freq = len([i for i in ssw if i.split("-")[1] == '09'])/(len(ssw)) + oct_freq = len([i for i in ssw if i.split("-")[1] == '10'])/(len(ssw)) seas_ssw = np.array([jun_freq, jul_freq, aug_freq, sep_freq, oct_freq]) - ci_sjun_lo, ci_sjun_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='06']), len(ssw), alpha=0.05, method='wilson') - ci_sjul_lo, ci_sjul_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='07']), len(ssw), alpha=0.05, method='wilson') - ci_saug_lo, ci_saug_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='08']), len(ssw), alpha=0.05, method='wilson') - ci_ssep_lo, ci_ssep_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='09']), len(ssw), alpha=0.05, method='wilson') - ci_soct_lo, ci_soct_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1]=='10']), len(ssw), alpha=0.05, method='wilson') - ci_ssw = [(seas_ssw[0]-ci_sjun_lo, ci_sjun_up-seas_ssw[0]), (seas_ssw[1]-ci_sjul_lo, ci_sjul_up-seas_ssw[1]), - (seas_ssw[2]-ci_saug_lo, ci_saug_up-seas_ssw[2]), (seas_ssw[3]-ci_ssep_lo, ci_ssep_up-seas_ssw[3]), + ci_sjun_lo, ci_sjun_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '06']), + len(ssw), alpha=0.05, method='wilson') + ci_sjul_lo, ci_sjul_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '07']), + len(ssw), alpha=0.05, method='wilson') + ci_saug_lo, ci_saug_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '08']), + len(ssw), alpha=0.05, method='wilson') + ci_ssep_lo, ci_ssep_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '09']), + len(ssw), alpha=0.05, method='wilson') + ci_soct_lo, ci_soct_up = sm.stats.proportion_confint(len([i for i in ssw if i.split("-")[1] == '10']), + len(ssw), alpha=0.05, method='wilson') + ci_ssw = [(seas_ssw[0]-ci_sjun_lo, ci_sjun_up-seas_ssw[0]), (seas_ssw[1]-ci_sjul_lo, + ci_sjul_up-seas_ssw[1]), + (seas_ssw[2]-ci_saug_lo, ci_saug_up-seas_ssw[2]), (seas_ssw[3]-ci_ssep_lo, + ci_ssep_up-seas_ssw[3]), (seas_ssw[4]-ci_soct_lo, ci_soct_up-seas_ssw[4])] - # Need VI central dates - vi = stc_spv_extremes_defs.spv_vi(uzm_spec,hem=hemi) + # Need VI central dates + vi = stc_spv_extremes_defs.spv_vi(uzm_spec, hem=hemi) # Determine VI frequency tot_freq_vi = len(vi)/len(yr) @@ -246,72 +263,78 @@ def plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi): file_handler.write(f"{item}\n") file_handler.write(f"Frequency for {FIRSTYR}-{LASTYR}: {tot_freq_vi:.2f}") - #Determine seasonality of frequency + # Determine seasonality of frequency seas_vi = None if vi: - jun_freq = len([i for i in vi if i.split("-")[1]=='06'])/(len(vi)) - jul_freq = len([i for i in vi if i.split("-")[1]=='07'])/(len(vi)) - aug_freq = len([i for i in vi if i.split("-")[1]=='08'])/(len(vi)) - sep_freq = len([i for i in vi if i.split("-")[1]=='09'])/(len(vi)) - oct_freq = len([i for i in vi if i.split("-")[1]=='10'])/(len(vi)) + jun_freq = len([i for i in vi if i.split("-")[1] == '06'])/(len(vi)) + jul_freq = len([i for i in vi if i.split("-")[1] == '07'])/(len(vi)) + aug_freq = len([i for i in vi if i.split("-")[1] == '08'])/(len(vi)) + sep_freq = len([i for i in vi if i.split("-")[1] == '09'])/(len(vi)) + oct_freq = len([i for i in vi if i.split("-")[1] == '10'])/(len(vi)) seas_vi = np.array([jun_freq, jul_freq, aug_freq, sep_freq, oct_freq]) - ci_vjun_lo, ci_vjun_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='06']), len(vi), alpha=0.05, method='wilson') - ci_vjul_lo, ci_vjul_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='07']), len(vi), alpha=0.05, method='wilson') - ci_vaug_lo, ci_vaug_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='08']), len(vi), alpha=0.05, method='wilson') - ci_vsep_lo, ci_vsep_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='09']), len(vi), alpha=0.05, method='wilson') - ci_voct_lo, ci_voct_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1]=='10']), len(vi), alpha=0.05, method='wilson') + ci_vjun_lo, ci_vjun_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '06']), + len(vi), alpha=0.05, method='wilson') + ci_vjul_lo, ci_vjul_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '07']), + len(vi), alpha=0.05, method='wilson') + ci_vaug_lo, ci_vaug_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '08']), + len(vi), alpha=0.05, method='wilson') + ci_vsep_lo, ci_vsep_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '09']), + len(vi), alpha=0.05, method='wilson') + ci_voct_lo, ci_voct_up = sm.stats.proportion_confint(len([i for i in vi if i.split("-")[1] == '10']), + len(vi), alpha=0.05, method='wilson') ci_vi = [(seas_vi[0]-ci_vjun_lo, ci_vjun_up-seas_vi[0]), (seas_vi[1]-ci_vjul_lo, ci_vjul_up-seas_vi[1]), (seas_vi[2]-ci_vaug_lo, ci_vaug_up-seas_vi[2]), (seas_vi[3]-ci_vsep_lo, ci_vsep_up-seas_vi[3]), (seas_vi[4]-ci_voct_lo, ci_voct_up-seas_vi[4])] - fig, ax = plt.subplots(1,2, figsize=(10, 5)) + fig, ax = plt.subplots(1, 2, figsize=(10, 5)) xlab_str = f"Month" ylab_str1 = f"Fractional occurrence per month" # Set plot limits, add labels, and make axis square - ax[0].set_xlim(0,6) - ax[0].set_ylim(0,1) + ax[0].set_xlim(0, 6) + ax[0].set_ylim(0, 1) ax[0].set_xlabel(xlab_str, fontsize=14) ax[0].set_ylabel(ylab_str1, fontsize=14) - ax[0].set_xticks([1,2,3,4,5]) + ax[0].set_xticks([1, 2, 3, 4, 5]) - if (hemi == 'NH'): - ax[0].set_xticklabels(['Nov','Dec','Jan','Feb','Mar']) - elif (hemi == 'SH'): - ax[0].set_xticklabels(['Jun','Jul','Aug','Sep','Oct']) + if hemi == 'NH': + ax[0].set_xticklabels(['Nov', 'Dec', 'Jan', 'Feb', 'Mar']) + elif hemi == 'SH': + ax[0].set_xticklabels(['Jun', 'Jul', 'Aug', 'Sep', 'Oct']) if ssw: - ax[0].bar([1,2,3,4,5],seas_ssw,color='k',edgecolor='k', yerr=np.array(ci_ssw).T,ecolor='grey') - ax[0].text(.5,.9,f'Total SSW Freq/yr = {tot_freq_ssw:.2f}') + ax[0].bar([1, 2, 3, 4, 5], seas_ssw, color='k', edgecolor='k', yerr=np.array(ci_ssw).T, ecolor='grey') + ax[0].text(.5, .9, f'Total SSW Freq/yr = {tot_freq_ssw:.2f}') else: - ax[0].text(2,.5,'No SSWs detected') + ax[0].text(2, .5, 'No SSWs detected') - ax[0].set_title(f'Fraction of {hemi} SSW occurrence by month',fontsize=16) + ax[0].set_title(f'Fraction of {hemi} SSW occurrence by month', fontsize=16) # Set plot limits, add labels, and make axis square - ax[1].set_xlim(0,6) - ax[1].set_ylim(0,1) + ax[1].set_xlim(0, 6) + ax[1].set_ylim(0, 1) ax[1].set_xlabel(xlab_str, fontsize=14) ax[1].set_ylabel(ylab_str1, fontsize=14) - ax[1].set_xticks([1,2,3,4,5]) + ax[1].set_xticks([1, 2, 3, 4, 5]) - if (hemi == 'NH'): - ax[1].set_xticklabels(['Nov','Dec','Jan','Feb','Mar']) - elif (hemi == 'SH'): - ax[1].set_xticklabels(['Jun','Jul','Aug','Sep','Oct']) + if hemi == 'NH': + ax[1].set_xticklabels(['Nov', 'Dec', 'Jan', 'Feb', 'Mar']) + elif hemi == 'SH': + ax[1].set_xticklabels(['Jun', 'Jul', 'Aug', 'Sep', 'Oct']) if vi: - ax[1].bar([1,2,3,4,5],seas_vi,color='k',edgecolor='k', yerr=np.array(ci_vi).T,ecolor='grey') - ax[1].text(.5,.9,f'Total VI Freq/yr = {tot_freq_vi:.2f}') + ax[1].bar([1, 2, 3, 4, 5], seas_vi, color='k', edgecolor='k', yerr=np.array(ci_vi).T, ecolor='grey') + ax[1].text(.5, .9, f'Total VI Freq/yr = {tot_freq_vi:.2f}') else: ax[1].text(2,.5,'No VIs detected') - ax[1].set_title(f'Fraction of {hemi} VI occurrence by month',fontsize=16) + ax[1].set_title(f'Fraction of {hemi} VI occurrence by month', fontsize=16) fig.tight_layout() - return (fig,ax) + return fig, ax + def plot_dripping_paint(uzm_10, zg_pcap, hemi): @@ -350,29 +373,29 @@ def plot_dripping_paint(uzm_10, zg_pcap, hemi): """ - #This function standardizes the polar cap geopotential heights at each pressure - #level by the daily climatology + # This function standardizes the polar cap geopotential heights at each pressure + # level by the daily climatology std_anom = zg_pcap.groupby("time.dayofyear").map(stc_spv_extremes_defs.standardize) ssw = None vi = None avgssw = None avgvi = None - if (hemi == 'NH'): + if hemi == 'NH': # Need SSW and VI central dates uzm_spec = uzm_10.interp(lat=60) - ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec,hem=hemi) - vi = stc_spv_extremes_defs.spv_vi(uzm_spec,hem=hemi) + ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec, hem=hemi) + vi = stc_spv_extremes_defs.spv_vi(uzm_spec, hem=hemi) if ssw: yrs = np.array([i.split("-")[0] for i in ssw]).astype(int) mns = np.array([i.split("-")[1] for i in ssw]).astype(int) dys = np.array([i.split("-")[2] for i in ssw]).astype(int) - #composite across events + # composite across events avgssw = stc_spv_extremes_defs.composite(std_anom, yrs, mns, dys) - [a_mean,prob_ssw]=stc_spv_extremes_defs.ttest_1samp(avgssw,0.,dim="event") + [a_mean, prob_ssw] = stc_spv_extremes_defs.ttest_1samp(avgssw, 0., dim="event") else: print('No SSWs detected') @@ -383,26 +406,26 @@ def plot_dripping_paint(uzm_10, zg_pcap, hemi): dys = np.array([i.split("-")[2] for i in vi]).astype(int) avgvi = stc_spv_extremes_defs.composite(std_anom, yrs, mns, dys) - [a_mean,prob_vi]=stc_spv_extremes_defs.ttest_1samp(avgvi,0.,dim="event") + [a_mean, prob_vi] = stc_spv_extremes_defs.ttest_1samp(avgvi,0., dim="event") else: print('No VIs detected') - if (hemi == 'SH'): + if hemi == 'SH': # Need SSW central dates uzm_spec = uzm_10.interp(lat=-60) - ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec,hem=hemi) - vi = stc_spv_extremes_defs.spv_vi(uzm_spec,hem=hemi) + ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec, hem=hemi) + vi = stc_spv_extremes_defs.spv_vi(uzm_spec, hem=hemi) if ssw: yrs = np.array([i.split("-")[0] for i in ssw]).astype(int) mns = np.array([i.split("-")[1] for i in ssw]).astype(int) dys = np.array([i.split("-")[2] for i in ssw]).astype(int) - #composite across events + # composite across events avgssw = stc_spv_extremes_defs.composite(std_anom, yrs, mns, dys) - [a_mean,prob_ssw]=stc_spv_extremes_defs.ttest_1samp(avgssw,0.,dim="event") + [a_mean, prob_ssw] = stc_spv_extremes_defs.ttest_1samp(avgssw, 0., dim="event") else: print('No SSWs detected') @@ -413,26 +436,26 @@ def plot_dripping_paint(uzm_10, zg_pcap, hemi): dys = np.array([i.split("-")[2] for i in vi]).astype(int) avgvi = stc_spv_extremes_defs.composite(std_anom, yrs, mns, dys) - [a_mean,prob_vi]=stc_spv_extremes_defs.ttest_1samp(avgvi,0.,dim="event") + [a_mean, prob_vi]=stc_spv_extremes_defs.ttest_1samp(avgvi, 0., dim="event") else: print('No VIs detected') - import matplotlib.colors as colors - fig, ax = plt.subplots(2,1, figsize=(10, 8),constrained_layout=True) + + fig, ax = plt.subplots(2, 1, figsize=(10, 8), constrained_layout=True) lev = np.linspace(-2, 2, 21) - cmap ='RdBu_r' + cmap = 'RdBu_r' press = zg_pcap.plev.values - lag = np.arange(-20,60,1) #lags are hard-coded here (and within stc_spv_extremes_defs.py) + lag = np.arange(-20, 60, 1) # lags are hard-coded here (and within stc_spv_extremes_defs.py) xlab_str = f"Lag [days]" ylab_str = f"Pressure [hPa]" # Set plot limits, add labels - ax[0].set_xlim(-20,60-1) - ax[0].set_ylim(10,1000) + ax[0].set_xlim(-20, 60-1) + ax[0].set_ylim(10, 1000) ax[0].set_yscale('log') ax[0].invert_yaxis() ax[0].set_xlabel(xlab_str, fontsize=14) @@ -444,16 +467,17 @@ def plot_dripping_paint(uzm_10, zg_pcap, hemi): mask = np.logical_and(prob_ssw > 0.05, prob_ssw < 0.95) ax[0].contourf(lag, press, avgssw.mean("event").transpose(), levels=lev, cmap=cmap, - norm=colors.CenteredNorm(),extend='both') - ax[0].contourf(lag,press, mask.transpose(),levels=[.1,1],hatches=['..'],colors='none') - ax[0].vlines(x=0,ymin=np.min(press),ymax=np.max(press),color='gray') - ax[0].set_title(f'Standardized polar cap geopotential height anomalies \n composited for {hemi} SSWs ({ct_ssw} events)',fontsize=16) + norm=colors.CenteredNorm(), extend='both') + ax[0].contourf(lag,press, mask.transpose(), levels=[.1, 1], hatches=['..'], colors='none') + ax[0].vlines(x=0, ymin=np.min(press), ymax=np.max(press),color='gray') + ax[0].set_title(f'Standardized polar cap geopotential height anomalies \n composited for {hemi} SSWs' + f'({ct_ssw} events)', fontsize=16) else: ax[0].text(0,100, 'No SSWs detected') - ax[1].set_xlim(-20,60-1) - ax[1].set_ylim(10,1000) + ax[1].set_xlim(-20, 60-1) + ax[1].set_ylim(10, 1000) ax[1].set_yscale('log') ax[1].invert_yaxis() ax[1].set_xlabel(xlab_str, fontsize=14) @@ -465,16 +489,18 @@ def plot_dripping_paint(uzm_10, zg_pcap, hemi): mask = np.logical_and(prob_vi > 0.05, prob_vi < 0.95) m2=ax[1].contourf(lag, press, avgvi.mean("event").transpose(), levels=lev, cmap=cmap, - norm=colors.CenteredNorm(),extend='both') - ax[1].contourf(lag,press, mask.transpose(),levels=[.1,1],hatches=['..'],colors='none') - ax[1].vlines(x=0,ymin=np.min(press),ymax=np.max(press),color='gray') - ax[1].set_title(f'Composite polar cap geopotential height anomalies \n for {hemi} VIs ({ct_vi} events)',fontsize=16) + norm=colors.CenteredNorm(),extend='both') + ax[1].contourf(lag, press, mask.transpose(), levels=[.1, 1], hatches=['..'], colors='none') + ax[1].vlines(x=0, ymin=np.min(press), ymax=np.max(press), color='gray') + ax[1].set_title(f'Composite polar cap geopotential height anomalies \n for {hemi} VIs ({ct_vi} events)', + fontsize=16) fig.colorbar(m2, ax=ax[:], ticks=lev[::2], orientation='vertical',label='[Std Dev]') else: - ax[1].text(0,100, 'No VIs detected') + ax[1].text(0, 100, 'No VIs detected') - return (fig,ax) + return fig, ax + def plot_composite_maps(uzm_10, zg_500, tas, hemi): @@ -513,19 +539,19 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): ts_ssw = None zg_vi = None ts_vi = None - if (hemi == 'NH'): + if hemi == 'NH': # Need SSW and VI central dates uzm_spec = uzm_10.interp(lat=60) - ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec,hem=hemi) - vi = stc_spv_extremes_defs.spv_vi(uzm_spec,hem=hemi) + ssw = stc_spv_extremes_defs.ssw_cp07(uzm_spec, hem=hemi) + vi = stc_spv_extremes_defs.spv_vi(uzm_spec, hem=hemi) if ssw: yrs = np.array([i.split("-")[0] for i in ssw]).astype(int) mns = np.array([i.split("-")[1] for i in ssw]).astype(int) dys = np.array([i.split("-")[2] for i in ssw]).astype(int) - #composite across events + # composite across events zg_ssw = stc_spv_extremes_defs.composite(zg_500, yrs, mns, dys, 30, 30) ts_ssw = stc_spv_extremes_defs.composite(tas, yrs, mns, dys, 30, 30) @@ -543,7 +569,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): else: print('No VIs detected') - if (hemi == 'SH'): + if hemi == 'SH': # Need SSW central dates uzm_spec = uzm_10.interp(lat=-60) @@ -556,7 +582,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): mns = np.array([i.split("-")[1] for i in ssw]).astype(int) dys = np.array([i.split("-")[2] for i in ssw]).astype(int) - #composite across events + # composite across events zg_ssw = stc_spv_extremes_defs.composite(zg_500, yrs, mns, dys, 30, 30) ts_ssw = stc_spv_extremes_defs.composite(tas, yrs, mns, dys, 30, 30) @@ -575,48 +601,44 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): else: print('No VIs detected') - from palettable.colorbrewer.diverging import RdBu_11 - cmap=RdBu_11.mpl_colormap.reversed() - minlat = [] maxlat = [] fig = [] ax = [] - if (hemi == 'NH'): - fig, ax = plt.subplots(2,2, figsize=(10, 10),subplot_kw={'projection': ccrs.NorthPolarStereo()}) + if hemi == 'NH': + fig, ax = plt.subplots(2, 2, figsize=(10, 10), subplot_kw={'projection': ccrs.NorthPolarStereo()}) minlat = 30 maxlat = 90 - if (hemi == 'SH'): - fig, ax = plt.subplots(2,2, figsize=(10, 10),subplot_kw={'projection': ccrs.SouthPolarStereo()}) + if hemi == 'SH': + fig, ax = plt.subplots(2, 2, figsize=(10, 10), subplot_kw={'projection': ccrs.SouthPolarStereo()}) minlat = -90 maxlat = -30 lev1 = np.linspace(-80, 80, 21) lev1 = np.delete(lev1, [10]) # delete zero level lev2 = np.linspace(-2, 2, 21) - cmap ='RdBu_r' + cmap = 'RdBu_r' - m=ax[0,0].coastlines(linewidth=0.2) - ax[0,0].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) + m = ax[0, 0].coastlines(linewidth=0.2) + ax[0, 0].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) theta = np.linspace(0, 2*np.pi, 100) center, radius = [0.5, 0.5], 0.5 verts = np.vstack([np.sin(theta), np.cos(theta)]).T circle = mpath.Path(verts * radius + center) - ax[0,0].set_boundary(circle, transform=ax[0,0].transAxes) - - ax[0,1].coastlines(linewidth=0.2) - ax[0,1].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) - ax[0,1].set_boundary(circle, transform=ax[0,1].transAxes) + ax[0, 0].set_boundary(circle, transform=ax[0, 0].transAxes) - ax[1,0].coastlines(linewidth=0.2) - ax[1,0].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) - ax[1,0].set_boundary(circle, transform=ax[1,0].transAxes) + ax[0, 1].coastlines(linewidth=0.2) + ax[0, 1].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) + ax[0, 1].set_boundary(circle, transform=ax[0, 1].transAxes) - ax[1,1].coastlines(linewidth=0.2) - ax[1,1].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) - ax[1,1].set_boundary(circle, transform=ax[1,1].transAxes) + ax[1, 0].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) + ax[1, 0].set_boundary(circle, transform=ax[1, 0].transAxes) + ax[1, 1].coastlines(linewidth=0.2) + ax[1, 1].set_extent([-180, 180, minlat, maxlat], ccrs.PlateCarree()) + ax[1, 1].set_boundary(circle, transform=ax[1, 1].transAxes) + if ssw: ct_ssw = len(ssw) @@ -631,29 +653,29 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): cyclic_z, cyclic_lon = add_cyclic_point(ssw_z_before, coord=lon) cyclic_t, cyclic_lon = add_cyclic_point(ssw_t_before, coord=lon) - m=ax[0,0].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, - extend='both') - m1=ax[0,0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', - extend='both') - ax[0,0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', - linewidths=0.3, extend='both') - ax[0,0].set_title(f'30-day average prior to {hemi} SSWs \n ({ct_ssw} events)',fontsize=14) - ax[0,0].clabel(m1,m1.levels[::2],inline=False, inline_spacing=1, fontsize=12) + m = ax[0, 0].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, + extend='both') + m1 = ax[0, 0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', + extend='both') + ax[0, 0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', + linewidths=0.3, extend='both') + ax[0, 0].set_title(f'30-day average prior to {hemi} SSWs \n ({ct_ssw} events)', fontsize=14) + ax[0, 0].clabel(m1,m1.levels[::2],inline=False, inline_spacing=1, fontsize=12) cyclic_z, cyclic_lon = add_cyclic_point(ssw_z_after, coord=lon) cyclic_t, cyclic_lon = add_cyclic_point(ssw_t_after, coord=lon) - m=ax[0,1].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, - extend='both') - m1=ax[0,1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', - extend='both') - ax[0,1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', - linewidths=0.3, extend='both') - ax[0,1].set_title(f'30-day average after {hemi} SSWs \n ({ct_ssw} events)',fontsize=14) - ax[0,1].clabel(m1,m1.levels[::2],inline=False, inline_spacing=1, fontsize=12) + m=ax[0, 1].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, + extend='both') + m1=ax[0, 1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', + extend='both') + ax[0, 1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', + linewidths=0.3, extend='both') + ax[0, 1].set_title(f'30-day average after {hemi} SSWs \n ({ct_ssw} events)', fontsize=14) + ax[0, 1].clabel(m1, m1.levels[::2], inline=False, inline_spacing=1, fontsize=12) else: - ax[0,0].text(0,90, 'No SSWs detected') - ax[0,1].text(0,90, 'No SSWs detected') + ax[0, 0].text(0, 90, 'No SSWs detected') + ax[0, 1].text(0, 90, 'No SSWs detected') if vi: @@ -661,62 +683,64 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): lat = zg_vi.lat.values lon = zg_vi.lon.values - vi_z_before = zg_vi.sel(time=slice(-30,-1)).mean(["event","time"]) - vi_t_before = ts_vi.sel(time=slice(-30,-1)).mean(["event","time"]) + vi_z_before = zg_vi.sel(time=slice(-30, -1)).mean(["event", "time"]) + vi_t_before = ts_vi.sel(time=slice(-30, -1)).mean(["event" ,"time"]) - vi_z_after = zg_vi.sel(time=slice(0,29)).mean(["event","time"]) - vi_t_after = ts_vi.sel(time=slice(0,29)).mean(["event","time"]) + vi_z_after = zg_vi.sel(time=slice(0, 29)).mean(["event", "time"]) + vi_t_after = ts_vi.sel(time=slice(0, 29)).mean(["event", "time"]) cyclic_z, cyclic_lon = add_cyclic_point(vi_z_before, coord=lon) cyclic_t, cyclic_lon = add_cyclic_point(vi_t_before, coord=lon) - m=ax[1,0].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, - extend='both') - m1=ax[1,0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', - linewidths=2,extend='both') - ax[1,0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', - linewidths=0.3, extend='both') - ax[1,0].set_title(f'30-day average prior to {hemi} VIs \n ({ct_vi} events)',fontsize=14) - ax[1,0].clabel(m1,m1.levels[::2],inline=False, inline_spacing=1, fontsize=12) + m = ax[1, 0].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, + extend='both') + m1 = ax[1, 0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', + linewidths=2,extend='both') + ax[1, 0].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', + linewidths=0.3, extend='both') + ax[1, 0].set_title(f'30-day average prior to {hemi} VIs \n ({ct_vi} events)', fontsize=14) + ax[1, 0].clabel(m1, m1.levels[::2], inline=False, inline_spacing=1, fontsize=12) cyclic_z, cyclic_lon = add_cyclic_point(vi_z_after, coord=lon) cyclic_t, cyclic_lon = add_cyclic_point(vi_t_after, coord=lon) - m=ax[1,1].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, - extend='both') - m1=ax[1,1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', - linewidths=2,extend='both') - ax[1,1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', - linewidths=0.3, extend='both') - ax[1,1].set_title(f'30-day average after {hemi} VIs \n ({ct_vi} events)',fontsize=14) - ax[1,1].clabel(m1,m1.levels[::2],inline=False, inline_spacing=1, fontsize=12) + m = ax[1, 1].contourf(cyclic_lon, lat, cyclic_t, transform=ccrs.PlateCarree(), levels=lev2, cmap=cmap, + extend='both') + m1 = ax[1,1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=lev1, colors='k', + linewidths=2,extend='both') + ax[1, 1].contour(cyclic_lon, lat, cyclic_z, transform=ccrs.PlateCarree(), levels=[0], colors='k', + linewidths=0.3, extend='both') + ax[1, 1].set_title(f'30-day average after {hemi} VIs \n ({ct_vi} events)',fontsize=14) + ax[1, 1].clabel(m1, m1.levels[::2], inline=False, inline_spacing=1, fontsize=12) else: - ax[1,0].text(0,90, 'No VIs detected') - ax[1,1].text(0,90, 'No VIs detected') + ax[1, 0].text(0, 90, 'No VIs detected') + ax[1, 1].text(0, 90, 'No VIs detected') plt.suptitle("Near-surface air temperature (shading)\n and 500 hPa geopotential height anomalies (contours)", - fontsize=16) + fontsize=16) fig.tight_layout() fig.subplots_adjust(right=0.8) cb_ax = fig.add_axes([0.85, 0.25, 0.03, 0.5]) - cbar = fig.colorbar(m, cax=cb_ax, ticks=lev2[::2],orientation='vertical',label='[degK]') + cbar = fig.colorbar(m, cax=cb_ax, ticks=lev2[::2], orientation='vertical', label='[degK]') cbar.ax.tick_params(labelsize=12, width=1) - return (fig,ax) + return fig, ax ########################################################################## # --- BEGIN SCRIPT --- # ########################################################################## + + print('\n=======================================') print('BEGIN stc_spv_extremes.py ') print('=======================================\n') -##### Parse MDTF-set environment variables +# Parse MDTF-set environment variables print('*** Parse MDTF-set environment variables ...') CASENAME = os.environ['CASENAME'] -FIRSTYR = int(os.environ['FIRSTYR']) -LASTYR = int(os.environ['LASTYR']) -WK_DIR = os.environ['WK_DIR'] +FIRSTYR = int(os.environ['startdate']) +LASTYR = int(os.environ['enddate']) +WK_DIR = os.environ['WORK_DIR'] OBS_DIR = os.environ['OBS_DATA'] ufi = os.environ['UA_FILE'] @@ -731,7 +755,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): # Do error-checking on these environment variables. Rather than trying to # correct the values, we throw errors so that users can adjust their config # files in the appropriate manner, and obtain expected results. -if (PCAP_LO_LAT < 30): +if PCAP_LO_LAT < 30: msg = 'PCAP_LO_LAT must be >= 30' raise ValueError(msg) @@ -750,20 +774,20 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): # Restrict to common time period (note, here we assume that all model variables are the same length in time) mod_firstyr = uazm.time.dt.year.values[0] mod_lastyr = uazm.time.dt.year.values[-1] -print(mod_firstyr,mod_lastyr) +print(mod_firstyr, mod_lastyr) print(f'***Limiting model data to {FIRSTYR} to {LASTYR}***') -if (FIRSTYR < mod_firstyr): - msg = 'FIRSTYR must be >= model first year' +if FIRSTYR < mod_firstyr: + msg = 'startdate must be >= model first year' raise ValueError(msg) -if (LASTYR > mod_lastyr): - msg = 'LASTYR must be <= model last year' +if LASTYR > mod_lastyr: + msg = 'enddate must be <= model last year' raise ValueError(msg) -uazms = uazm.sel(time=slice(str(FIRSTYR),str(LASTYR))) -zgzms = zgzm.sel(time=slice(str(FIRSTYR),str(LASTYR))) -zg500s = zg500.sel(time=slice(str(FIRSTYR),str(LASTYR))) -tass = tas.sel(time=slice(str(FIRSTYR),str(LASTYR))) +uazms = uazm.sel(time=slice(str(FIRSTYR), str(LASTYR))) +zgzms = zgzm.sel(time=slice(str(FIRSTYR), str(LASTYR))) +zg500s = zg500.sel(time=slice(str(FIRSTYR), str(LASTYR))) +tass = tas.sel(time=slice(str(FIRSTYR), str(LASTYR))) # Calendar types may vary across models. To address this, we convert the calendar to "standard" print(f' *** Calendar type of model is: '+uazms.time.attrs['calendar_type']) @@ -785,7 +809,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): tass = tass.convert_calendar('standard', align_on="year") print(f'***Determine whether model pressure levels are in Pa or hPa, convert to hPa') -if getattr(uazms.plev,'units') == 'Pa': +if getattr(uazms.plev, 'units') == 'Pa': print(f'**Converting pressure levels to hPa') uazms = uazms.assign_coords({"plev": (uazms.plev/100.)}) uazms.plev.attrs['units'] = 'hPa' @@ -806,7 +830,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): zgzm = zgzm.close() zg500 = zg500.close() -#This function removes the daily seasonal cycle. This step can take awhile. +# This function removes the daily seasonal cycle. This step can take awhile. print('*** Computing anomalies of 500 hPa geopotential height and surface temperature') zg_anom = zg500s.groupby("time.dayofyear").map(stc_spv_extremes_defs.deseasonalize) ts_anom = tass.groupby("time.dayofyear").map(stc_spv_extremes_defs.deseasonalize) @@ -833,12 +857,12 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): print(f'*** Creating {hemi} downward coupling composites') drip_plot = f'{plot_dir}/{CASENAME}_{hemi}_SPV_Drip_Plot.png' - fig,ax = plot_dripping_paint(uzm_10, zg_pcap[hemi], hemi) + fig, ax = plot_dripping_paint(uzm_10, zg_pcap[hemi], hemi) fig.savefig(drip_plot) print(f'*** Creating {hemi} Z500 and tas composites') map_plot = f'{plot_dir}/{CASENAME}_{hemi}_SPV_Composite_Map.png' - fig,ax = plot_composite_maps(uzm_10, zg_500[hemi], tas[hemi], hemi) + fig, ax = plot_composite_maps(uzm_10, zg_500[hemi], tas[hemi], hemi) fig.savefig(map_plot) # Output data will have dimensions of [hemi, time, lev], where hemi @@ -848,17 +872,17 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): outfile = data_dir+f'/{CASENAME}_SPV-extremes_diagnostics.nc' # Prepare the output variables and their metadata -zg_pcap = xr.concat([zg_pcap['SH'], zg_pcap['NH']], dim='hemi') +zg_pcap = xr.concat([zg_pcap['SH'], zg_pcap['NH']], dim='hemi') zg_pcap.name = 'zg_pcap' zg_pcap.attrs['units'] = 'm' zg_pcap.attrs['long_name'] = f'{PCAP_LO_LAT}-90 polar cap geopotential heights' -zg_500 = xr.concat([zg_500['SH'], zg_500['NH']], dim='hemi') +zg_500 = xr.concat([zg_500['SH'], zg_500['NH']], dim='hemi') zg_500.name = 'zg_500' zg_500.attrs['units'] = 'm' zg_500.attrs['long_name'] = f'Daily geopotential height anomalies at 500 hPa' -tas = xr.concat([tas['SH'], tas['NH']], dim='hemi') +tas = xr.concat([tas['SH'], tas['NH']], dim='hemi') tas.name = 'tas' tas.attrs['units'] = 'K' tas.attrs['long_name'] = f'Daily surface air temperature anomalies' @@ -875,7 +899,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): print(f'*** Saving SPV-extremes diagnostics to {outfile}') out_ds.to_netcdf(outfile, encoding=encoding) -## Loading obs data files & plotting obs figures: ########################## +# Loading obs data files & plotting obs figures: ########################## print(f'*** Now working on obs data\n------------------------------') obs_file = OBS_DIR + '/stc_spv_extremes_obs-data.nc' @@ -888,10 +912,10 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): obs_lastyr = obs.time.dt.year.values[-1] print(f'***Limiting obs data to {FIRSTYR} to {LASTYR}***') - if (FIRSTYR < obs_firstyr): + if FIRSTYR < obs_firstyr: msg = 'FIRSTYR must be >= obs first year' raise ValueError(msg) - if (LASTYR > obs_lastyr): + if LASTYR > obs_lastyr: msg = 'LASTYR must be <= obs last year' raise ValueError(msg) @@ -905,7 +929,7 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): zg_pcap['NH'] = lat_avg(obs.zg_zm, PCAP_LO_LAT, 90) zg_pcap['SH'] = lat_avg(obs.zg_zm, -90, -PCAP_LO_LAT) - #This function removes the daily seasonal cycle + # This function removes the daily seasonal cycle print('*** Computing anomalies of 500 hPa geopotential height and surface temperature') zg_anom = obs.zg_500.groupby("time.dayofyear").map(stc_spv_extremes_defs.deseasonalize) ts_anom = obs.tas.groupby("time.dayofyear").map(stc_spv_extremes_defs.deseasonalize) @@ -926,21 +950,22 @@ def plot_composite_maps(uzm_10, zg_500, tas, hemi): freq_plot = f'{plot_dir}/obs_{hemi}_Freq_SPV_BarPlot.png' filepath_ssw = f'{WK_DIR}/obs/netCDF/{rean}_{hemi}_ssw.txt' filepath_vi = f'{WK_DIR}/obs/netCDF/{rean}_{hemi}_vi.txt' - fig,ax = plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi) + fig, ax = plot_spv_hist(uzm_10, hemi, filepath_ssw, filepath_vi) fig.savefig(freq_plot) print(f'*** Creating {hemi} downward coupling composites') drip_plot = f'{plot_dir}/obs_{hemi}_SPV_Drip_Plot.png' - fig,ax = plot_dripping_paint(uzm_10, zg_pcap[hemi], hemi) + fig, ax = plot_dripping_paint(uzm_10, zg_pcap[hemi], hemi) fig.savefig(drip_plot) print(f'*** Creating {hemi} Z500 and tas composites') map_plot = f'{plot_dir}/obs_{hemi}_SPV_Composite_Map.png' - fig,ax = plot_composite_maps(uzm_10, zg_500[hemi], tas[hemi], hemi) + fig, ax = plot_composite_maps(uzm_10, zg_500[hemi], tas[hemi], hemi) fig.savefig(map_plot) -except: +except Exception as exc: print('*** Unable to create plots from the observational data: ') + print(exc) print(traceback.format_exc()) print('\n=====================================') diff --git a/diagnostics/stc_spv_extremes/stc_spv_extremes_defs.py b/diagnostics/stc_spv_extremes/stc_spv_extremes_defs.py index a9598e0ad..02bee0806 100644 --- a/diagnostics/stc_spv_extremes/stc_spv_extremes_defs.py +++ b/diagnostics/stc_spv_extremes/stc_spv_extremes_defs.py @@ -1,4 +1,4 @@ -''' +""" This module contains functions used in the Stratospheric Polar Vortex Extremes POD. @@ -11,14 +11,14 @@ spv_vi (find central dates of VIs) composite (average pressure-time variable across events) ttest_1samp (one sample t-test) -''' +""" import numpy as np import xarray as xr -from datetime import datetime,timedelta from scipy import stats -#*********************************************************************************** +# *********************************************************************************** + def standardize(x): r""" Standardize a variable x by subtracting its mean and dividing by its standard @@ -35,7 +35,7 @@ def standardize(x): return (stand_x) -#*********************************************************************************** +# *********************************************************************************** def deseasonalize(x): r""" remove the daily seasonal cycle by subtracting its mean over the total time period. @@ -49,9 +49,10 @@ def deseasonalize(x): x_anom = (x - x.mean("time")) - return (x_anom) + return x_anom + +# ************************************************************************************ -#************************************************************************************ def lat_avg(ds, lat_lo, lat_hi): r""" Calculate a meridional average of data. The average is done using @@ -83,7 +84,7 @@ def lat_avg(ds, lat_lo, lat_hi): """ # Limit the latitude range without assuming the ordering of lats - ds_tmp = ds.isel(lat = np.logical_and(ds.lat >= lat_lo, ds.lat <= lat_hi)) + ds_tmp = ds.isel(lat=np.logical_and(ds.lat >= lat_lo, ds.lat <= lat_hi)) # Define the cos(lat) weights wgts = np.cos(np.deg2rad(ds_tmp.lat)) @@ -93,10 +94,10 @@ def lat_avg(ds, lat_lo, lat_hi): ds_wgt_avg = ds_tmp.weighted(wgts).mean('lat') return ds_wgt_avg -#***************************************************************************** +# ***************************************************************************** -def getConsecutiveValues(arr): - + +def getConsecutiveValues(arr): """ This calculates and groups consecutive values of an array of numbers, which must be in an ascending order. @@ -107,25 +108,26 @@ def getConsecutiveValues(arr): if len(arr) == 1: final.append(arr) else: - for i in range(1,len(arr)) : - if arr[i] - arr[i-1] == 1 : + for i in range(1, len(arr)): + if arr[i] - arr[i-1] == 1: end = i else : - if len(arr[start:end+1])==0: + if len(arr[start:end+1]) == 0: final.append(arr[start:start+1]) else: final.append(arr[start:end+1]) start = i - if i == len(arr) - 1 : - if len(arr[start:end+1])==0: + if i == len(arr) - 1: + if len(arr[start:end+1]) == 0: final.append(arr[start:start+1]) else: final.append(arr[start:end+1]) return final -#************************************************************************** +# ************************************************************************** -def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): + +def ssw_cp07(variable, threshold=0, consec_days=20, hem="NH"): """ This calculates central dates of sudden stratospheric warmings following the definition in @@ -154,7 +156,7 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): """ year = variable.time.dt.year.values - yr = np.arange(year[0],year[-1]+1,1) + yr = np.arange(year[0], year[-1]+1, 1) yr = yr.tolist() ssw_dates = [] @@ -163,25 +165,26 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): # look for mid-winter SSWs between Nov-Mar in the NH if hem == "NH": - s_str = str(y)+"-11-01" - e_str = str(y+1)+"-03-31" - #print("Calculating NH SSWs for "+s_str+" to "+e_str) - var = variable.sel(time=slice(s_str,e_str)) - var_chk = variable.sel(time=slice(s_str,str(y+1)+"-04-30")) #this variable enables check for final warming + s_str = str(y) + "-11-01" + e_str = str(y+1) + "-03-31" + # print("Calculating NH SSWs for "+s_str+" to "+e_str) + var = variable.sel(time=slice(s_str, e_str)) + # this variable enables check for final warming + var_chk = variable.sel(time=slice(s_str, str(y+1) + "-04-30")) if hem == "SH": - s_str = str(y)+"-06-01" - e_str = str(y)+"-10-31" - #print("Calculating SH SSWs for "+s_str+" to "+e_str) - var = variable.sel(time=slice(s_str,e_str)) - var_chk = variable.sel(time=slice(s_str,str(y)+"-11-30")) #this variable enables check for final warming + s_str = str(y) + "-06-01" + e_str = str(y) + "-10-31" + # print("Calculating SH SSWs for "+s_str+" to "+e_str) + var = variable.sel(time=slice(s_str, e_str)) + var_chk = variable.sel(time=slice(s_str, str(y)+"-11-30")) # this variable enables check for final warming var = var.assign_coords(dayofwinter=("time", np.arange(len(var.time.values)))) var_chk = var_chk.assign_coords(dayofwinter=("time", np.arange(len(var_chk.time.values)))) - #Find instances where U1060 is less than threshold - vor_neg = var.where(var < threshold,drop=True) + # Find instances where U1060 is less than threshold + vor_neg = var.where(var < threshold, drop=True) - #determine consecutive groups of easterlies + # determine consecutive groups of easterlies dayswitheasterlies = getConsecutiveValues(vor_neg.dayofwinter.values) # if there's only one group, check for final warming and if no final warming, append central date to ssw_dates # @@ -192,7 +195,7 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): # search over all winds between end of candidate central event and april 30th for 10 consecutive days of # westerlies. if 10 consecutive days of westerlies are found, append the central date to ssws # windsafterwinter = var_chk[lastvalue:] - westerlies = windsafterwinter.where(windsafterwinter > threshold,drop=True) + westerlies = windsafterwinter.where(windsafterwinter > threshold, drop=True) if len(westerlies) > 0: westerlygroups = getConsecutiveValues(westerlies.dayofwinter.values) @@ -219,7 +222,7 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): # search for multiple SSWs by looping over 'groups' # - for i,v in enumerate(dayswitheasterlies): + for i, v in enumerate(dayswitheasterlies): # "break" statement used b/c the loop always considers a group[i] and the next group[i+1], # # so the loop must be exited on the the 2nd to last index # @@ -229,7 +232,6 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): # Get the first/last index from the current group currentgroup = dayswitheasterlies[int(i)] - first_currentgroup = currentgroup[0] last_currentgroup = currentgroup[-1] # Get the first index from the next (current+1) group @@ -238,7 +240,8 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): # If the groups are separated by "consec_days" of westerlies, check for final warming # if first_nextgroup - last_currentgroup > consec_days: - # search over all winds between candidate central date and april 30th for 10 consecutive days of westerlies # + # search over all winds between candidate central date and april 30th for 10 consecutive + # days of westerlies # # if 10 consecutive days of westerlies are found, append the central date to ssw_dates # windsafterwinter = var_chk[first_nextgroup:] westerlies = windsafterwinter.where(windsafterwinter > threshold, drop=True) @@ -251,9 +254,10 @@ def ssw_cp07(variable,threshold=0, consec_days=20, hem="NH"): return ssw_dates -#************************************************************************** +# ************************************************************************** + -def spv_vi(variable, thresh = 0.8, persist=10, consec_days=20, hem="NH"): +def spv_vi(variable, thresh=0.8, persist=10, consec_days=20, hem="NH"): """ This calculates central dates of polar vortex intensifications (VIs), @@ -292,7 +296,7 @@ def spv_vi(variable, thresh = 0.8, persist=10, consec_days=20, hem="NH"): vi_dates = [] month_day_str = xr.DataArray(variable.indexes['time'].strftime('%m-%d'), coords=variable.coords, - name='month_day_str') + name='month_day_str') daily_thresh = variable.groupby(month_day_str).quantile(thresh,dim='time') for y in yr: @@ -300,14 +304,13 @@ def spv_vi(variable, thresh = 0.8, persist=10, consec_days=20, hem="NH"): # look for mid-winter VIs between Nov-Mar in the NH, June-Oct in the SH if hem == "NH": # uses a fixed threshold representing the "thresh"*100 percentile of the NDJFM values - #new_thr = variable.sel(time=variable.time.dt.month.isin([1,2,3,11,12])).quantile(thresh, dim='time') if y == yr[-1]: break else: s_str = str(y)+"-11-01" e_str = str(y+1)+"-03-31" - #print("Calculating NH VIs for "+s_str+" to "+e_str) + # print("Calculating NH VIs for "+s_str+" to "+e_str) var = variable.sel(time=slice(s_str,e_str)) var_chk = variable.sel(time=slice(s_str,str(y+1)+"-04-30")) var_th = xr.concat([daily_thresh.sel(month_day_str=slice('11-01','12-31')), @@ -318,37 +321,34 @@ def spv_vi(variable, thresh = 0.8, persist=10, consec_days=20, hem="NH"): var_th = var_th.where(~(var_th.month_day_str == '02-29'),drop=True) if hem == "SH": - s_str = str(y)+"-06-01" - e_str = str(y)+"-10-31" - #print("Calculating SH VIs for "+s_str+" to "+e_str) - var = variable.sel(time=slice(s_str,e_str)) - var_chk = variable.sel(time=slice(s_str,str(y)+"-11-30")) + s_str = str(y) + "-06-01" + e_str = str(y) + "-10-31" + # print("Calculating SH VIs for "+s_str+" to "+e_str) + var = variable.sel(time=slice(s_str, e_str)) + var_chk = variable.sel(time=slice(s_str, str(y)+"-11-30")) var_th = daily_thresh.sel(month_day_str=slice('06-01','10-31')) var = var.assign_coords(dayofwinter=("time", np.arange(len(var.time.values)))) var_chk = var_chk.assign_coords(dayofwinter=("time", np.arange(len(var_chk.time.values)))) new_thr = xr.DataArray(var_th.values, dims={'time': np.arange(len(var.time.values))}) - #Find instances where U1060 is greater than threshold + # Find instances where U1060 is greater than threshold vor_int = var.where(var > new_thr,drop=True) - #determine consecutive groups of days above threshold + # determine consecutive groups of days above threshold daysabovethreshold = getConsecutiveValues(vor_int.dayofwinter.values) - - # if there's only one group, check that winds are sustained for consec_days and append central date to vi_dates # + # if there's only one group, check that winds are sustained for consec_days and append central date to vi_dates if len(daysabovethreshold) == 1: firstvalue = daysabovethreshold[0][0] lastvalue = daysabovethreshold[0][-1] if (lastvalue - firstvalue) > persist-1: vi_dates.append(var.dayofwinter[firstvalue].time.dt.strftime("%Y-%m-%d").values.tolist()) - - if len(daysabovethreshold) > 1: # if there are multiple 'groups': - - # search for multiple VIs by looping over 'groups' # + if len(daysabovethreshold) > 1: # if there are multiple 'groups': + # search for multiple VIs by looping over 'groups' # last_date = np.array([]) - for i,v in enumerate(daysabovethreshold): + for i, v in enumerate(daysabovethreshold): # Get the first/last index from the current group currentgroup = daysabovethreshold[int(i)] @@ -357,22 +357,23 @@ def spv_vi(variable, thresh = 0.8, persist=10, consec_days=20, hem="NH"): if (last_currentgroup - first_currentgroup) > persist-1: - if i==0 or last_date.size==0 : #on first iteration/event, no separation check needed - vi_dates.append(var.dayofwinter[first_currentgroup].time.dt.strftime("%Y-%m-%d").values.tolist()) - last_date = last_currentgroup #this sets the last_date as the last date of a valid event + if i == 0 or last_date.size == 0: # on first iteration/event, no separation check needed + vi_dates.append( + var.dayofwinter[first_currentgroup].time.dt.strftime("%Y-%m-%d").values.tolist()) + last_date = last_currentgroup # this sets the last_date as the last date of a valid event else: # Get the last index from the previous (current-1) group - oldgroup = daysabovethreshold[int(i-1)] - first_oldgroup = oldgroup[0] - last_oldgroup = oldgroup[-1] if (first_currentgroup - last_date) > consec_days-1: - vi_dates.append(var.dayofwinter[first_currentgroup].time.dt.strftime("%Y-%m-%d").values.tolist()) - last_date = last_currentgroup #this sets the last_date as the last date of a valid event - + vi_dates.append( + var.dayofwinter[first_currentgroup].time.dt.strftime("%Y-%m-%d").values.tolist()) + # this sets the last_date as the last date of a valid event + last_date = last_currentgroup + return vi_dates -#************************************************************************** +# ************************************************************************** + def composite(variable, yre, mne, dye, lag_before=20, lag_after=60): @@ -409,25 +410,25 @@ def composite(variable, yre, mne, dye, lag_before=20, lag_after=60): from datetime import datetime,timedelta - #initialize with first event + # initialize with first event count = np.arange(len(yre)) - cen = datetime(year=yre[0],day=dye[0],month=mne[0]) + cen = datetime(year=yre[0], day=dye[0], month=mne[0]) en = cen + timedelta(days=lag_after-1) sta = cen - timedelta(days=lag_before) - lag = np.arange(-lag_before,lag_after,1) + lag = np.arange(-lag_before, lag_after, 1) edate = en.strftime("%Y-%m-%d") stdate = sta.strftime("%Y-%m-%d") - avgvar = variable.sel(time=slice(stdate,edate)) + avgvar = variable.sel(time=slice(stdate, edate)) avgvar = avgvar.assign_coords(time=lag) avgvar = avgvar.expand_dims(dim="event") for dat in count[1:]: - cen = datetime(year=yre[dat],day=dye[dat],month=mne[dat]) + cen = datetime(year=yre[dat], day=dye[dat], month=mne[dat]) en = cen + timedelta(days=lag_after-1) sta = cen - timedelta(days=lag_before) edate = en.strftime("%Y-%m-%d") stdate = sta.strftime("%Y-%m-%d") - newvar = variable.sel(time=slice(stdate,edate)) + newvar = variable.sel(time=slice(stdate, edate)) newvar = newvar.assign_coords(time=lag) newvar = newvar.expand_dims(dim="event") @@ -436,8 +437,8 @@ def composite(variable, yre, mne, dye, lag_before=20, lag_after=60): return avgvar +# **************************************************************************** -#**************************************************************************** def ttest_1samp(a, popmean, dim): """ @@ -471,10 +472,9 @@ def ttest_1samp(a, popmean, dim): v = a.var(dim, ddof=1) denom = np.sqrt(v / float(n)) - t = d /denom + t = d / denom prob = stats.distributions.t.sf(np.fabs(t), df) * 2 prob_xa = xr.DataArray(prob, coords=a_mean.coords) return a_mean, prob_xa -#************************************************************************************** - +# ************************************************************************************** diff --git a/diagnostics/stc_vert_wave_coupling/settings.jsonc b/diagnostics/stc_vert_wave_coupling/settings.jsonc index ca2bcddae..0a91e2c7d 100644 --- a/diagnostics/stc_vert_wave_coupling/settings.jsonc +++ b/diagnostics/stc_vert_wave_coupling/settings.jsonc @@ -7,7 +7,7 @@ "settings" : { "driver" : "stc_vert_wave_coupling.py", "long_name" : "Seasonality and Extremes of Vertically Propagating Planetary Waves", - "realm" : "atmos", + "convention" : "cmip", "description" : "Assess the representation of planetary wave coupling between the troposphere and stratosphere", "pod_env_vars" : { // The first and last years of the obs data to use. @@ -33,8 +33,16 @@ } }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "lev": { "standard_name": "air_pressure", "units": "hPa", @@ -46,6 +54,7 @@ "varlist": { "va50": { "standard_name": "northward_wind", + "realm": "atmos", "units": "m s-1", "frequency": "day", "dimensions": ["time", "lat", "lon"], @@ -54,6 +63,7 @@ }, "ta50": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "frequency": "day", "dimensions": ["time", "lat", "lon"], @@ -62,6 +72,7 @@ }, "zg10": { "standard_name" : "geopotential_height", + "realm": "atmos", "units" : "m", "frequency": "day", "dimensions": ["time", "lat", "lon"], @@ -69,6 +80,7 @@ }, "zg500": { "standard_name" : "geopotential_height", + "realm": "atmos", "units" : "m", "frequency": "day", "dimensions": ["time", "lat", "lon"], diff --git a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.html b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.html index 780f5fbda..5b9141a58 100644 --- a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.html +++ b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.html @@ -15,7 +15,7 @@

Seasonality and extremes of vertically propagating planetary waves

{{CASENAME}}

-Wave amplitude climatologies +< color=navy>Wave amplitude climatologies

From left to right, these plots show climatological time series of the zonal wavenumbers 1, 2, and 3 wave amplitudes. The thick solid lines are the climatological mean, while the grey envelopes represent the 1st and 3rd quartiles across the years. The thin dashed lines are the all-time range. The top @@ -38,7 +38,7 @@

{{CASENAME}}

-50 hPa Polar Cap Eddy Heat Flux Histograms +< color=navy>50 hPa Polar Cap Eddy Heat Flux Histograms

From left to right, these histograms show the 50 hPa polar cap eddy heat flux distributions. The vertical lines are plotted at the 10th and 90th percentiles; the numbers in the corner are the numeric values of these percentiles. These values are what have been used to define 50 hPa heat flux extremes. Keep in mind that @@ -63,7 +63,7 @@

{{CASENAME}}

-Eddy Height Composites for Heat Flux Extremes +< color=navy>Eddy Height Composites for Heat Flux Extremes

From left to right, these maps show the eddy geopotential height climatology for the given season, the anomalous eddy heights for extreme positive 50 hPa heat flux days, and the anomalous eddy heights for extreme negative 50 hPa heat flux days. The line contours are representative of the 500 hPa level, @@ -89,7 +89,7 @@

{{CASENAME}}

-10 vs 500 hPa Correlation Coherence of Zonal Waves 1 and 2 +< color=navy>10 vs 500 hPa Correlation Coherence of Zonal Waves 1 and 2

These plots show the correlation coherence of zonal waves 1 (left) and 2 (right) between 10 and 500 hPa for different bimonthly periods of the extended winter seasons. For negative lags, the stratospheric signal leads the troposphere, indicative of downward wave coupling @@ -110,4 +110,4 @@

{{CASENAME}}

SH Model Composites SH Reanalysis Composites
\ No newline at end of file +
diff --git a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.py b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.py index df6273b2a..d8f9087c1 100644 --- a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.py +++ b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling.py @@ -107,9 +107,9 @@ # Parse MDTF-set environment variables print('*** Parse MDTF-set environment variables ...') CASENAME = os.environ['CASENAME'] -FIRSTYR = int(os.environ['FIRSTYR']) -LASTYR = int(os.environ['LASTYR']) -WK_DIR = os.environ['WK_DIR'] +FIRSTYR = int(os.environ['startdate']) +LASTYR = int(os.environ['enddate']) +WK_DIR = os.environ['WORK_DIR'] OBS_DATA = os.environ['OBS_DATA'] z10fi = os.environ['ZG10_FILE'] @@ -184,11 +184,12 @@ can_plot_obs = True -except Exception: +except Exception as exc: msg = '*** Unable to read all of the pre-digested ERA5 data. ' +\ f'Please check that you have the pre-digested data in {OBS_DATA}' print(msg) print(traceback.format_exc()) + print(exc) can_plot_obs = False # Begin computing the necessary diagnostics @@ -215,7 +216,7 @@ # different sets of plots. Each block will iterate over making plots for the NH # and SH, and then saving the digested model data (if requested by the user) # -### BEGIN WAVE AMP CLIMO CODEBLOCK ### +# BEGIN WAVE AMP CLIMO CODEBLOCK ### hs = {60: 'N', -60: 'S'} amp_titles = '{} 60°{} GeoHgt Wave Amplitudes ({}-{})' amp_finames = '{}-60{}-wave-amps.eps' @@ -265,10 +266,10 @@ 'z_k_imag': {'dtype': 'float32'}} dat2save = xr.merge([z_k_real, z_k_imag]) dat2save.to_netcdf(outfile, encoding=encoding) -### END WAVE AMP CLIMO CODEBLOCK ### +# END WAVE AMP CLIMO CODEBLOCK ### -### BEGIN EDDY HEAT FLUX HISTO CODEBLOCK ### +# BEGIN EDDY HEAT FLUX HISTO CODEBLOCK ### hs = {1: 'NH', -1: 'SH'} seas = {1: 'JFM', -1: 'SON'} mons = {1: [1, 2, 3], -1: [9, 10, 11]} @@ -311,10 +312,10 @@ outfile = f'{data_dir}/{CASENAME}_50hPa_pcap_eddy-heat-flux.nc' encoding = {'ehf_pcap_50': {'dtype': 'float32'}} vt50_k_pcap.to_netcdf(outfile, encoding=encoding) -### END EDDY HEAT FLUX HISTO CODEBLOCK ### +# END EDDY HEAT FLUX HISTO CODEBLOCK ### -### BEGIN EDDY HEIGHT COMPOSITE CODEBLOCK ### +# BEGIN EDDY HEIGHT COMPOSITE CODEBLOCK ### ehc_titles = '{} Extreme Heat Flux Composites\n' +\ '{} Eddy Heights & Anomalies ({}, {}-{})' ehc_finames = '{}-extreme-vt-eddy-heights-{}.eps' @@ -365,7 +366,7 @@ lo_thresh = obs_lo_thresh hi_thresh = obs_hi_thresh - if (lo_thresh > 0): + if lo_thresh > 0: print('*** (WARNING) The lower heat flux threshold exceeds 0! Interpret results with caution!') print('*** Finding model dates of extreme pos/neg heat flux events ' + f'for {hs[hemi]} {seas[hemi]}') @@ -380,10 +381,10 @@ finame = ehc_finames.format(CASENAME, hs[hemi]) fig.savefig(plot_dir+finame, facecolor='white', dpi=150, bbox_inches='tight') -### END EDDY HEIGHT COMPOSITE CODEBLOCK ### +# END EDDY HEIGHT COMPOSITE CODEBLOCK ### -### BEGIN CORRELATION COHERENCE CODEBLOCK ### +# BEGIN CORRELATION COHERENCE CODEBLOCK ### cc_titles = '{} {} Winter Seasons ({}-{})' cc_finames = '{}-corr-coh-{}.eps' @@ -410,7 +411,7 @@ fig.savefig(plot_dir+finame, facecolor='white', dpi=150, bbox_inches='tight') # Save the relevant digested geohgt data -if SAVE_DERIVED_DATA is True: +if SAVE_DERIVED_DATA: print('*** Saving the model FFT coefficients for 45-80 lat bands') z_k_real = np.real(z_k_4580) z_k_real.name = 'z_k_real' @@ -430,7 +431,7 @@ dat2save = xr.merge([z_k_real, z_k_imag]) dat2save.hemi.attrs['long_name'] = 'hemisphere (-1 for SH, 1 for NH)' dat2save.to_netcdf(outfile, encoding=encoding) -### END CORRELATION COHERENCE CODEBLOCK ### +# END CORRELATION COHERENCE CODEBLOCK ### print('\n=====================================') print('END stc_vert_wave_coupling.py ') diff --git a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_calc.py b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_calc.py index d0d28bd27..51bc3b98a 100644 --- a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_calc.py +++ b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_calc.py @@ -57,7 +57,7 @@ def zonal_wave_coeffs(dat, keep_waves=None): Parameters ---------- - ds : `xarray.DataArray` + dat : `xarray.DataArray` The input DataArray to take the FFT on. keep_waves : list of ints (optional) or None @@ -231,10 +231,10 @@ def _sigma(s, c): """ - if (s.ndim > 1) or (c.ndim > 1): + if s.ndim > 1 or c.ndim > 1: msg = 'cross_spectral_corr._sigma: s an c must both be 1-D' raise ValueError(msg) - if (s.size != c.size): + if s.size != c.size: msg = 'cross_spectral_corr._sigma: s and c must have the same size' raise ValueError(msg) @@ -278,7 +278,7 @@ def _lin_corr_times(x, y, lags, sigma_x, sigma_y, times): """ - if (x.size != y.size): + if x.size != y.size: msg = 'cross_spectral_corr._lin_corr_times: x and y must have the same size' raise ValueError(msg) @@ -295,7 +295,7 @@ def _lin_corr_times(x, y, lags, sigma_x, sigma_y, times): # because some elements will sit next to elements that are from the next year. # The trick here is to use an array of times that match the data, and to select # only the pairs that have the given lag equal to tau. - if (tau > 0): + if tau > 0: times_x = times[0:n-tau] times_y = times[tau:n+1] good_pairs = np.where(times_y-times_x == np.timedelta64(tau, 'D')) @@ -356,4 +356,4 @@ def _lin_corr_times(x, y, lags, sigma_x, sigma_y, times): phas = np.arctan2(-qd, co) phas = xr.DataArray(phas, dims=['lag'], coords=[lags]) - return (coh, phas) + return coh, phas diff --git a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_plot.py b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_plot.py index d2dda84ff..2a7159802 100644 --- a/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_plot.py +++ b/diagnostics/stc_vert_wave_coupling/stc_vert_wave_coupling_plot.py @@ -91,7 +91,7 @@ def wave_ampl_climo_plot(z_k, lat, obs=None): xlab_pos = (np.diff(xticks)*0.5)+xticks[0:-1] # Handle the obs if given. Compute amplitudes and climo line. - if (obs is not None): + if obs is not None: obs_ampls = 2*np.abs(obs.interp(lat=lat))/obs.nlons obs_climo = obs_ampls.groupby('time.dayofyear').mean('time') obs_max_doy = int(obs_climo.dayofyear.max()) @@ -99,7 +99,7 @@ def wave_ampl_climo_plot(z_k, lat, obs=None): # Some models use different calendars (e.g., noleap, 360day). # For comparison, we will interpolate the obs to the same time # range as the input model dataset - if (obs_max_doy != max_doy): + if obs_max_doy != max_doy: obs_climo = obs_climo.assign_coords({'dayofyear': np.linspace(1, max_doy, obs_max_doy)}) obs_climo = obs_climo.interp(dayofyear=climo.dayofyear) @@ -129,7 +129,7 @@ def wave_ampl_climo_plot(z_k, lat, obs=None): # plot the climo in a thicker black line ax.plot(climo.dayofyear, climo.sel(lev=lev, zonal_wavenum=wavenum).roll(dayofyear=roll_to), color='black', linewidth=3.0, label='Climatology') - if (obs is not None): + if obs is not None: # plot the obs climo in an orange line for comparison ax.plot(climo.dayofyear, obs_climo.sel(lev=lev, zonal_wavenum=wavenum).roll(dayofyear=roll_to), color='#ff8c00', linewidth=1.0, label='Obs Climatology') @@ -141,13 +141,13 @@ def wave_ampl_climo_plot(z_k, lat, obs=None): plt.text(xlab_pos[ix]/max_doy, -0.1, xlabel, fontsize=16, ha='center', transform=ax.transAxes) ax.tick_params(axis='both', which='major', length=7, width=1.2, labelsize=14) - if (lev == 10): + if lev == 10: plt.title(f'Wave {wavenum}', fontsize=18) - if (lev == 10) and (wavenum == 3): + if lev == 10 and wavenum == 3: plt.legend(frameon=False, fontsize=13, loc='upper left') - if (wavenum == 1): + if wavenum == 1: plt.text(0.05, 0.90, f'{lev} hPa', color='red', fontsize=16, transform=ax.transAxes, fontweight='semibold') @@ -190,16 +190,16 @@ def heatflux_histo_plot(vt_k, months, hemi, obs=None): # limit the input data and obs to the specific months vt_to_plot = vt_k.where(vt_k['time.month'].isin(months), drop=True) - if (obs is not None): + if obs is not None: obs_to_plot = obs.where(obs['time.month'].isin(months), drop=True) # The histogram bins we'll plot as a function # of the hemisphere and wavenumber - if (hemi == 1): + if hemi == 1: bins = {1: np.linspace(-80, 140, 25), 2: np.linspace(-50, 100, 25), 3: np.linspace(-30, 30, 25)} - elif (hemi == -1): + elif hemi == -1: bins = {1: np.linspace(-140, 80, 25), 2: np.linspace(-100, 50, 25), 3: np.linspace(-30, 30, 25)} @@ -222,7 +222,7 @@ def heatflux_histo_plot(vt_k, months, hemi, obs=None): plt.text(0.85, 0.93, percstring, transform=ax.transAxes, fontsize=12, ha='center', va='center') # handle the obs if given; overplot similar step-histos and percentile vertical lines - if (obs is not None): + if obs is not None: obs_percs = np.percentile(obs_to_plot.sel(zonal_wavenum=wavenum), [10, 90]) percstring = f'{obs_percs[0]:0.1f}, {obs_percs[1]:0.1f}' @@ -230,7 +230,8 @@ def heatflux_histo_plot(vt_k, months, hemi, obs=None): histtype='step', color='#FF8C00', linewidth=1, label='Obs') plt.axvline(obs_percs[0], color='#FF8C00', linestyle='--', linewidth=0.7) plt.axvline(obs_percs[1], color='#FF8C00', linestyle='--', linewidth=0.7) - plt.text(0.85, 0.85, percstring, transform=ax.transAxes, color='#FF8C00', fontsize=12, ha='center', va='center') + plt.text(0.85, 0.85, percstring, transform=ax.transAxes, color='#FF8C00', fontsize=12, ha='center', + va='center') plt.xlim((bins[wavenum].min(), bins[wavenum].max())) x0, x1 = ax.get_xlim() @@ -240,10 +241,10 @@ def heatflux_histo_plot(vt_k, months, hemi, obs=None): ax.xaxis.set_major_locator(ticker.MaxNLocator(6)) plt.title(f'Wave {wavenum}', fontsize=18) - if (wavenum == 1): + if wavenum == 1: ax.legend(loc='center right', frameon=False) plt.ylabel('Normalized Frequency', fontsize=16) - if (wavenum == 2): + if wavenum == 2: plt.xlabel('Eddy Heat Flux due to wave-k [K m/s]', fontsize=16) fig.subplots_adjust(top=0.95, wspace=0.35, left=0.10, bottom=0.05, right=0.99) @@ -283,7 +284,7 @@ def eddy_hgt_hfevents(z10_eddy, z500_eddy, pos_dates, neg_dates, hemi): """ - if (hemi not in [-1, 1]): + if hemi not in [-1, 1]: msg = 'hemi must be either 1 (for NH) or -1 (for SH)' raise ValueError(msg) @@ -303,7 +304,7 @@ def eddy_hgt_hfevents(z10_eddy, z500_eddy, pos_dates, neg_dates, hemi): # * extreme negative heat flux composite anomalies fig = plt.figure() for i in range(3): - if (i == 0): + if i == 0: q2p_10 = z10_clim.mean('dayofyear') q2p_500 = z500_clim.mean('dayofyear') clevs10 = (np.arange(-800, 0, 100), @@ -311,7 +312,7 @@ def eddy_hgt_hfevents(z10_eddy, z500_eddy, pos_dates, neg_dates, hemi): clevs500 = (np.arange(-500, 0, 20), np.arange(20, 501, 20)) title = 'Eddy Height Climo' - elif (i == 1): + elif i == 1: q2p_10 = z10_anom.sel(time=pos_dates).mean('time') q2p_500 = z500_anom.sel(time=pos_dates).mean('time') @@ -320,7 +321,7 @@ def eddy_hgt_hfevents(z10_eddy, z500_eddy, pos_dates, neg_dates, hemi): clevs500 = (np.arange(-500, 0, 10), np.arange(10, 501, 10)) title = f'+EHF50 Days ({len(pos_dates)})' - elif (i == 2): + elif i == 2: q2p_10 = z10_anom.sel(time=neg_dates).mean('time') q2p_500 = z500_anom.sel(time=neg_dates).mean('time') @@ -381,10 +382,10 @@ def corrcoh_seasons(z_fc, hemi): # The bimonthly composites for the different # extended winter seasons of each hemisphere - if (hemi == 1): + if hemi == 1: months = [(11, 12), (12, 1), (1, 2), (2, 3), (3, 4)] labels = ['ND', 'DJ', 'JF', 'FM', 'MA'] - elif (hemi == -1): + elif hemi == -1: months = [(7, 8), (8, 9), (9, 10), (10, 11), (11, 12)] labels = ['JA', 'AS', 'SO', 'ON', 'ND'] else: @@ -404,7 +405,7 @@ def corrcoh_seasons(z_fc, hemi): # Customize the plots ax.set_title(f'Wave {wavenum}', fontsize=20) - if (wavenum == 1): + if wavenum == 1: ax.set_ylabel('Correlation Coherence (500 hPa vs 10 hPa)', fontsize=17) ax.legend(frameon=False, loc='upper left', fontsize=13) ax.set_xlabel('<- strat leads trop | Lag [days] | trop leads strat ->', fontsize=16) @@ -413,7 +414,6 @@ def corrcoh_seasons(z_fc, hemi): ax.set_xlim((-10, 10)) ax.set_xticks(np.arange(-10, 11, 2)) ax.tick_params(axis='both', which='major', length=7, width=1.2, labelsize=14) - #ax.xaxis.set_major_locator(ticker.MaxNLocator(6)) fig.subplots_adjust(wspace=0.1, left=0.075, bottom=0.1, right=0.99, top=0.85) fig.set_size_inches(14, 6) diff --git a/diagnostics/temp_extremes_distshape/ObsSubset.py b/diagnostics/temp_extremes_distshape/ObsSubset.py index 5e9412907..90ff32473 100644 --- a/diagnostics/temp_extremes_distshape/ObsSubset.py +++ b/diagnostics/temp_extremes_distshape/ObsSubset.py @@ -14,7 +14,8 @@ # This file is part of the Surface Temperature Extremes and Distribution Shape Package # and the MDTF code package. See LICENSE.txt for the license. # -# Subsets input observational netcdf data to seasonal temperature distribution moment text files, seasonal shift ratio text files for both sides of the temperature distribution, and seasonal netcdf files +# Subsets input observational netcdf data to seasonal temperature distribution moment text files, +# seasonal shift ratio text files for both sides of the temperature distribution, and seasonal netcdf files # # Depends on the following scripts: # (1) ObsSubset_usp.py @@ -43,47 +44,58 @@ print("**************************************************") # ====================================================================== -### Subset netcdf files to summer and winter seasons +# Subset netcdf files to summer and winter seasons subprocess.call(['./Seasonal_NCfile.sh']) # ====================================================================== -### Load user-specified parameters +# Load user-specified parameters print("Load user-specified parameters...", end=' ') -os.system("python "+os.environ["POD_HOME"]+"/ObsSubset_usp.py") -with open(os.environ["WK_DIR"]+"/ObsSubset_parameters.json") as outfile: - sub_data=json.load(outfile) +os.system("python " + os.environ["POD_HOME"] + "/ObsSubset_usp.py") +with open(os.environ["WORK_DIR"] + "/ObsSubset_parameters.json") as outfile: + sub_data = json.load(outfile) print("...Loaded!") # ====================================================================== -### List model filenames for two-meter temperature data -T2Mfile=sorted(glob.glob(sub_data["MODEL_OUTPUT_DIR"]+"/"+sub_data["MODEL"]+"*"+sub_data["T2M_VAR"]+".day.nc"))[0] +# List model filenames for two-meter temperature data +T2Mfile = \ +sorted(glob.glob(sub_data["MODEL_OUTPUT_DIR"] + "/" + sub_data["MODEL"] + "*" + sub_data["T2M_VAR"] + ".day.nc"))[0] # ====================================================================== -### Subset Data for Seasonal Temperature Moments to Text Files +# Subset Data for Seasonal Temperature Moments to Text Files # ---- Generate a map of values corresponding to land regions only by masking -msk=Region_Mask(sub_data["REGION_MASK_DIR"]+'/'+sub_data["REGION_MASK_FILENAME"],T2Mfile,sub_data["LON_VAR"],sub_data["LAT_VAR"]) +msk = Region_Mask(sub_data["REGION_MASK_DIR"] + '/' + sub_data["REGION_MASK_FILENAME"], T2Mfile, sub_data["LON_VAR"], + sub_data["LAT_VAR"]) # ====================================================================== -### Loop over each season +# Loop over each season for seasind in range(len(sub_data["monthsubs"])): # ---- Calculate seasonal moments using two-meter temperature - seas_mean,seas_std,seas_skew,lon,lat=Seasonal_Moments(T2Mfile,sub_data["LON_VAR"],sub_data["LAT_VAR"],sub_data["T2M_VAR"],sub_data["TIME_VAR"],sub_data["monthsubs"][seasind],sub_data["yearbeg"],sub_data["yearend"],msk) + seas_mean, seas_std, seas_skew, lon, lat = Seasonal_Moments(T2Mfile, sub_data["LON_VAR"], sub_data["LAT_VAR"], + sub_data["T2M_VAR"], sub_data["TIME_VAR"], + sub_data["monthsubs"][seasind], sub_data["yearbeg"], + sub_data["yearend"], msk) # ---- Save out each moment as text file - numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"]+'/'+sub_data['MODEL']+'_moments_mean_'+sub_data["monthstrs"][seasind]+'.txt', seas_mean) - numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"]+'/'+sub_data['MODEL']+'_moments_std_'+sub_data["monthstrs"][seasind]+'.txt', seas_std) - numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"]+'/'+sub_data['MODEL']+'_moments_skew_'+sub_data["monthstrs"][seasind]+'.txt', seas_skew) + numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"] + '/' + sub_data['MODEL'] + '_moments_mean_' + sub_data["monthstrs"][ + seasind] + '.txt', seas_mean) + numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"] + '/' + sub_data['MODEL'] + '_moments_std_' + sub_data["monthstrs"][ + seasind] + '.txt', seas_std) + numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"] + '/' + sub_data['MODEL'] + '_moments_skew_' + sub_data["monthstrs"][ + seasind] + '.txt', seas_skew) # ====================================================================== # ---- Calculate two-meter temperature anomaly - T2Manom_data,lon,lat=Seasonal_Anomalies(T2Mfile,sub_data["LON_VAR"],sub_data["LAT_VAR"],sub_data["T2M_VAR"],sub_data["TIME_VAR"],sub_data["monthsubs"][seasind],sub_data["yearbeg"],sub_data["yearend"]) + T2Manom_data, lon, lat = Seasonal_Anomalies(T2Mfile, sub_data["LON_VAR"], sub_data["LAT_VAR"], sub_data["T2M_VAR"], + sub_data["TIME_VAR"], sub_data["monthsubs"][seasind], + sub_data["yearbeg"], sub_data["yearend"]) - ### Loop over each distribution tail + # Loop over each distribution tail for ptileval in sub_data["ptiles"]: # ---- Calculate underlying-to-Gaussian distribution shift ratio - shiftratio=ShiftRatio_Calc(ptileval,sub_data["shift"],msk,T2Manom_data,lon,lat) - numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"]+'/'+sub_data['MODEL']+'_shiftratio_'+sub_data["monthstrs"][seasind]+'_'+str(ptileval)+'th-ptile.txt', seas_skew) + shiftratio = ShiftRatio_Calc(ptileval, sub_data["shift"], msk, T2Manom_data, lon, lat) + numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"] + '/' + sub_data['MODEL'] + '_shiftratio_' + sub_data["monthstrs"][ + seasind] + '_' + str(ptileval) + 'th-ptile.txt', seas_skew) -### Save out latitude/longitude values -numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"]+'/'+sub_data['MODEL']+'_global_lons.txt',lon) -numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"]+'/'+sub_data['MODEL']+'_global_lats.txt',lat) +# Save out latitude/longitude values +numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"] + '/' + sub_data['MODEL'] + '_global_lons.txt', lon) +numpy.savetxt(sub_data["MODEL_OUTPUT_DIR"] + '/' + sub_data['MODEL'] + '_global_lats.txt', lat) diff --git a/diagnostics/temp_extremes_distshape/ObsSubset_usp.py b/diagnostics/temp_extremes_distshape/ObsSubset_usp.py index d6c8c459c..136fd79c7 100644 --- a/diagnostics/temp_extremes_distshape/ObsSubset_usp.py +++ b/diagnostics/temp_extremes_distshape/ObsSubset_usp.py @@ -14,31 +14,31 @@ # ====================================================================== # START USER SPECIFIED SECTION # ====================================================================== -### Model name and output directory -MODEL=os.environ["CASENAME"] -MODEL_OUTPUT_DIR=os.environ["DATADIR"]+"/day" +# Model name and output directory +MODEL = os.environ["CASENAME"] +MODEL_OUTPUT_DIR = os.environ["DATADIR"] + "/day" -### Variable Names -T2M_VAR=os.environ["tas_var"] -TIME_VAR=os.environ["time_coord"] -LAT_VAR=os.environ["lat_coord"] -LON_VAR=os.environ["lon_coord"] +# Variable Names +T2M_VAR = os.environ["tas_var"] +TIME_VAR = os.environ["time_coord"] +LAT_VAR = os.environ["lat_coord"] +LON_VAR = os.environ["lon_coord"] -### Set shift for non-Gaussian tail calculations -shift=0.5 +# Set shift for non-Gaussian tail calculations +shift = 0.5 -### Set seasons and percentiles to identify distribution tails -monthsubs=[[6,7,8],[12,1,2]] -monthstrs=['JJA','DJF'] -ptiles=[5,95] +# Set seasons and percentiles to identify distribution tails +monthsubs = [[6, 7, 8], [12, 1, 2]] +monthstrs = ['JJA', 'DJF'] +ptiles = [5, 95] -### Set range of years, season, and tail percentile threshold for calculations -yearbeg=int(os.environ["FIRSTYR"]) -yearend=int(os.environ["LASTYR"]) +# Set range of years, season, and tail percentile threshold for calculations +yearbeg = int(os.environ["startdate"]) +yearend = int(os.environ["enddate"]) -### Region mask directory & filename -REGION_MASK_DIR=os.environ["OBS_DATA"] -REGION_MASK_FILENAME="MERRA2_landmask.mat" +# Region mask directory & filename +REGION_MASK_DIR = os.environ["OBS_DATA"] +REGION_MASK_FILENAME = "MERRA2_landmask.mat" # ====================================================================== # END USER SPECIFIED SECTION @@ -49,39 +49,39 @@ # DO NOT MODIFY CODE BELOW # ====================================================================== -data={} +data = {} -data["MODEL"]=MODEL -data["MODEL_OUTPUT_DIR"]=MODEL_OUTPUT_DIR -data["REGION_MASK_DIR"]=REGION_MASK_DIR -data["REGION_MASK_FILENAME"]=REGION_MASK_FILENAME -data["shift"]=shift -data["yearbeg"]=yearbeg -data["yearend"]=yearend -data["TIME_VAR"]=TIME_VAR -data["LAT_VAR"]=LAT_VAR -data["LON_VAR"]=LON_VAR -data["T2M_VAR"]=T2M_VAR -data["monthsubs"]=monthsubs -data["monthstrs"]=monthstrs -data["ptiles"]=ptiles +data["MODEL"] = MODEL +data["MODEL_OUTPUT_DIR"] = MODEL_OUTPUT_DIR +data["REGION_MASK_DIR"] = REGION_MASK_DIR +data["REGION_MASK_FILENAME"] = REGION_MASK_FILENAME +data["shift"] = shift +data["yearbeg"] = yearbeg +data["yearend"] = yearend +data["TIME_VAR"] = TIME_VAR +data["LAT_VAR"] = LAT_VAR +data["LON_VAR"] = LON_VAR +data["T2M_VAR"] = T2M_VAR +data["monthsubs"] = monthsubs +data["monthstrs"] = monthstrs +data["ptiles"] = ptiles # Taking care of function arguments -data["args1"]=[ \ -REGION_MASK_DIR, \ -REGION_MASK_FILENAME, \ -MODEL_OUTPUT_DIR, \ -MODEL, \ -shift, \ -yearbeg, \ -yearend, \ -monthsubs, \ -monthstrs, \ -ptiles, \ -TIME_VAR, \ -T2M_VAR, \ -LAT_VAR, \ -LON_VAR ] +data["args1"] = [ + REGION_MASK_DIR, + REGION_MASK_FILENAME, + MODEL_OUTPUT_DIR, + MODEL, + shift, + yearbeg, + yearend, + monthsubs, + monthstrs, + ptiles, + TIME_VAR, + T2M_VAR, + LAT_VAR, + LON_VAR] -with open(os.environ["WK_DIR"]+"/ObsSubset_parameters.json", "w") as outfile: - json.dump(data, outfile) \ No newline at end of file +with open(os.environ["WORK_DIR"] + "/ObsSubset_parameters.json", "w") as outfile: + json.dump(data, outfile) diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps.py index bd8a1951d..35d8cdc40 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps.py @@ -14,16 +14,21 @@ # This file is part of the Surface Temperature Extremes and Distribution Shape Package # and the MDTF code package. See LICENSE.txt for the license. # -# Composites the seasonal circulation patterns associated with days in Non-Gaussian distribution tails at specified locations, following Loikith and Neelin (2019), Loikith et al. (2018), Loikith and Neelin (2015), Ruff and Neelin (2012) +# Composites the seasonal circulation patterns associated with days in Non-Gaussian distribution tails at +# specified locations, following Loikith and Neelin (2019), Loikith et al. (2018), Loikith and Neelin (2015), +# Ruff and Neelin (2012) # -# Generates spatial plot of seasonal composites at lag times to days identified as function of two-meter temperature, sea level pressure, and 500hPa geopotential height +# Generates spatial plot of seasonal composites at lag times to days identified as function of two-meter temperature, +# sea level pressure, and 500hPa geopotential height # # Depends on the following scripts: # (1) TempExtDistShape_CircComps_usp.py # (2) TempExtDistShape_CircComps_util.py # -# Defaults for location, plotting parameters, etc. that can be altered by user are in TempExtDistShape_CircComps_usp.py -# Defaults for season, tail percentile threshold, range of years, etc. that can be altered by user, are in TempExtDistShape_SeasonAndTail_usp.py +# Defaults for location, plotting parameters, etc. that can be altered by user are in +# TempExtDistShape_CircComps_usp.py +# Defaults for season, tail percentile threshold, range of years, etc. that can be altered by user, are in +# TempExtDistShape_SeasonAndTail_usp.py # # Utility functions are defined in TempExtDistShape_CircComps_util.py # @@ -33,6 +38,7 @@ import os import json import matplotlib + matplotlib.use('Agg') import matplotlib.pyplot as mplt import numpy @@ -51,69 +57,99 @@ print("**************************************************") # ====================================================================== -### Load user-specified parameters (usp) for calcluating and plotting shift ratio +# Load user-specified parameters (usp) for calcluating and plotting shift ratio print("Load user-specified parameters...") -os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_CircComps_usp.py") -with open(os.environ["WK_DIR"]+"/TempExtDistShape_CircComps_parameters.json") as outfile: - circ_data=json.load(outfile) +os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_CircComps_usp.py") +with open(os.environ["WORK_DIR"] + "/TempExtDistShape_CircComps_parameters.json") as outfile: + circ_data = json.load(outfile) print("...Loaded!") -monthsub=json.loads(circ_data["monthsub"]) #change unicode string into array of integers +monthsub = json.loads(circ_data["monthsub"]) # change unicode string into array of integers # ====================================================================== -### List model filenames based on variable of interest -T2Mfile=sorted(glob.glob(circ_data["MODEL_OUTPUT_DIR"]+"/"+circ_data["MODEL"]+"*"+circ_data["T2M_VAR"]+".day.nc"))[0] -SLPfile=sorted(glob.glob(circ_data["MODEL_OUTPUT_DIR"]+"/"+circ_data["MODEL"]+"*"+circ_data["SLP_VAR"]+".day.nc"))[0] -Z500file=sorted(glob.glob(circ_data["MODEL_OUTPUT_DIR"]+"/"+circ_data["MODEL"]+"*"+circ_data["Z500_VAR"]+".day.nc"))[0] +# List model filenames based on variable of interest +T2Mfile = \ +sorted(glob.glob(circ_data["MODEL_OUTPUT_DIR"] + "/" + circ_data["MODEL"] + "*" + circ_data["T2M_VAR"] + ".day.nc"))[0] +SLPfile = \ +sorted(glob.glob(circ_data["MODEL_OUTPUT_DIR"] + "/" + circ_data["MODEL"] + "*" + circ_data["SLP_VAR"] + ".day.nc"))[0] +Z500file = \ +sorted(glob.glob(circ_data["MODEL_OUTPUT_DIR"] + "/" + circ_data["MODEL"] + "*" + circ_data["Z500_VAR"] + ".day.nc"))[0] # ====================================================================== -### Calculate seasonal subset for each variable and rename from original file -T2M_data,lon,lat,datearrstr,T2M_units=Seasonal_Subset(T2Mfile,circ_data["LON_VAR"],circ_data["LAT_VAR"],circ_data["T2M_VAR"],circ_data["TIME_VAR"],monthsub,circ_data["yearbeg"],circ_data["yearend"]) -T2M_VAR='T2M' -SLP_data,lon,lat,datearrstr,SLP_units=Seasonal_Subset(SLPfile,circ_data["LON_VAR"],circ_data["LAT_VAR"],circ_data["SLP_VAR"],circ_data["TIME_VAR"],monthsub,circ_data["yearbeg"],circ_data["yearend"]) -SLP_VAR='SLP' -Z500_data,lon,lat,datearrstr,Z500_units=Seasonal_Subset(Z500file,circ_data["LON_VAR"],circ_data["LAT_VAR"],circ_data["Z500_VAR"],circ_data["TIME_VAR"],monthsub,circ_data["yearbeg"],circ_data["yearend"]) -Z500_VAR='Z500' +# Calculate seasonal subset for each variable and rename from original file +T2M_data, lon, lat, datearrstr, T2M_units = Seasonal_Subset(T2Mfile, circ_data["LON_VAR"], circ_data["LAT_VAR"], + circ_data["T2M_VAR"], circ_data["TIME_VAR"], monthsub, + circ_data["yearbeg"], circ_data["yearend"]) +T2M_VAR = 'T2M' +SLP_data, lon, lat, datearrstr, SLP_units = Seasonal_Subset(SLPfile, circ_data["LON_VAR"], circ_data["LAT_VAR"], + circ_data["SLP_VAR"], circ_data["TIME_VAR"], monthsub, + circ_data["yearbeg"], circ_data["yearend"]) +SLP_VAR = 'SLP' +Z500_data, lon, lat, datearrstr, Z500_units = Seasonal_Subset(Z500file, circ_data["LON_VAR"], circ_data["LAT_VAR"], + circ_data["Z500_VAR"], circ_data["TIME_VAR"], monthsub, + circ_data["yearbeg"], circ_data["yearend"]) +Z500_VAR = 'Z500' # ====================================================================== -### Calculate variable anomalies for temperature and 500hPa geopotential height -T2Manom_data=Variable_Anomaly(T2M_data,lon,lat,datearrstr) -Z500anom_data=Variable_Anomaly(Z500_data,lon,lat,datearrstr) +# Calculate variable anomalies for temperature and 500hPa geopotential height +T2Manom_data = Variable_Anomaly(T2M_data, lon, lat, datearrstr) +Z500anom_data = Variable_Anomaly(Z500_data, lon, lat, datearrstr) # ====================================================================== -### Composite seasonal circulation at specified location for lag times to days in the non-Gaussian distribution tail -tail_days_lags,statlonind,statlatind,T2M_data,SLP_data,Z500_data,Z500_units,SLP_units,T2M_units=Circ_Comp_Lags(T2Manom_data,T2M_data,T2M_units,SLP_data,SLP_units,Z500_data,Z500_units,lat,lon,circ_data["ptile"],circ_data["statlat"],circ_data["statlon"],circ_data["lagtot"],circ_data["lagstep"]) +# Composite seasonal circulation at specified location for lag times to days in the non-Gaussian distribution tail +(tail_days_lags, statlonind, statlatind, T2M_data, SLP_data, Z500_data, Z500_units, SLP_units, + T2M_units) = Circ_Comp_Lags( + T2Manom_data, T2M_data, T2M_units, SLP_data, SLP_units, Z500_data, Z500_units, lat, lon, circ_data["ptile"], + circ_data["statlat"], circ_data["statlon"], circ_data["lagtot"], circ_data["lagstep"]) # ====================================================================== -### Plot seasonal circulation at specified location for lag times to days in the non-Gaussian distribution tail +# Plot seasonal circulation at specified location for lag times to days in the non-Gaussian distribution tail # ----- values 0,1,2 specify columns of plot for temperature, sea level pressure, and geopotential height, respectively # ----- sea level pressure panels do not include anomalies, so zeroes are passed to the function where applicable print("Plotting Circulation Composites...") -fig, axes = mplt.subplots(len(numpy.arange(0,circ_data["lagtot"]+circ_data["lagstep"],circ_data["lagstep"])), 3, sharex='all',figsize=(30,15),subplot_kw=dict(projection=ccrs.PlateCarree(central_longitude=lon[statlonind]))) -subplotnum=1 -for lag in numpy.arange(0,circ_data["lagtot"]+circ_data["lagstep"],circ_data["lagstep"]): - figstep=lag//2 - - ### Correct for lags outside season - newtailinds=Lag_Correct(lag,figstep,tail_days_lags,datearrstr,circ_data["lagstep"],monthsub) - col1=Plot_Circ_Comp_Lags(T2Mfile,circ_data["LON_VAR"],figstep,0,lon,lat,newtailinds,T2M_data,T2Manom_data,T2M_VAR,circ_data["Tminval"],circ_data["Tmaxval"],circ_data["Trangestep"],lag,circ_data["Tanomminval"],circ_data["Tanommaxval"],circ_data["Tanomrangestep"],statlonind,statlatind,axes,fig) - col2=Plot_Circ_Comp_Lags(SLPfile,circ_data["LON_VAR"],figstep,1,lon,lat,newtailinds,SLP_data,0,SLP_VAR,circ_data["SLPminval"],circ_data["SLPmaxval"],circ_data["SLPrangestep"],lag,0,0,0,statlonind,statlatind,axes,fig) - col3=Plot_Circ_Comp_Lags(Z500file,circ_data["LON_VAR"],figstep,2,lon,lat,newtailinds,Z500_data,Z500anom_data,Z500_VAR,circ_data["Z500minval"],circ_data["Z500maxval"],circ_data["Z500rangestep"],lag,circ_data["Z500anomminval"],circ_data["Z500anommaxval"],circ_data["Z500anomrangestep"],statlonind,statlatind,axes,fig) - -### Format colorbars -Set_Colorbars(circ_data["Tminval"],circ_data["Tmaxval"],circ_data["Tcbarstep"],col1,circ_data["lagtot"]//2,0,T2M_units,axes,fig) -Set_Colorbars(circ_data["SLPminval"],circ_data["SLPmaxval"],circ_data["SLPcbarstep"],col2,circ_data["lagtot"]//2,1,SLP_units,axes,fig) -Set_Colorbars(circ_data["Z500minval"],circ_data["Z500maxval"],circ_data["Z500cbarstep"],col3,circ_data["lagtot"]//2,2,Z500_units,axes,fig) - -### Format subplot spacing +fig, axes = mplt.subplots(len(numpy.arange(0, circ_data["lagtot"] + circ_data["lagstep"], circ_data["lagstep"])), + 3, + sharex='all', figsize=(30, 15), + subplot_kw=dict(projection=ccrs.PlateCarree(central_longitude=lon[statlonind]))) +subplotnum = 1 +for lag in numpy.arange(0, circ_data["lagtot"] + circ_data["lagstep"], circ_data["lagstep"]): + figstep = lag // 2 + + # Correct for lags outside season + newtailinds = Lag_Correct(lag, figstep, tail_days_lags, datearrstr, circ_data["lagstep"], monthsub) + col1 = Plot_Circ_Comp_Lags(T2Mfile, circ_data["LON_VAR"], figstep, 0, lon, lat, newtailinds, T2M_data, T2Manom_data, + T2M_VAR, circ_data["Tminval"], circ_data["Tmaxval"], circ_data["Trangestep"], lag, + circ_data["Tanomminval"], circ_data["Tanommaxval"], circ_data["Tanomrangestep"], + statlonind, statlatind, axes, fig) + col2 = Plot_Circ_Comp_Lags(SLPfile, circ_data["LON_VAR"], figstep, 1, lon, lat, newtailinds, SLP_data, + 0, SLP_VAR, + circ_data["SLPminval"], circ_data["SLPmaxval"], circ_data["SLPrangestep"], lag, + 0, 0, 0, + statlonind, statlatind, axes, fig) + col3 = Plot_Circ_Comp_Lags(Z500file, circ_data["LON_VAR"], figstep, 2, lon, lat, newtailinds, Z500_data, + Z500anom_data, Z500_VAR, circ_data["Z500minval"], circ_data["Z500maxval"], + circ_data["Z500rangestep"], lag, circ_data["Z500anomminval"], + circ_data["Z500anommaxval"], circ_data["Z500anomrangestep"], statlonind, statlatind, + axes, fig) + +# Format colorbars +Set_Colorbars(circ_data["Tminval"], circ_data["Tmaxval"], circ_data["Tcbarstep"], col1, circ_data["lagtot"] // 2, + 0, + T2M_units, axes, fig) +Set_Colorbars(circ_data["SLPminval"], circ_data["SLPmaxval"], circ_data["SLPcbarstep"], col2, circ_data["lagtot"] // 2, + 1, SLP_units, axes, fig) +Set_Colorbars(circ_data["Z500minval"], circ_data["Z500maxval"], circ_data["Z500cbarstep"], col3, + circ_data["lagtot"] // 2, 2, Z500_units, axes, fig) + +# Format subplot spacing fig.subplots_adjust(wspace=0.06, hspace=0.02) -### Save figure to png +# Save figure to png fig.canvas.draw() -fig.savefig(circ_data["FIG_OUTPUT_DIR"]+"/"+circ_data["FIG_OUTPUT_FILENAME"],bbox_inches='tight') +fig.savefig(circ_data["FIG_OUTPUT_DIR"] + "/" + circ_data["FIG_OUTPUT_FILENAME"], bbox_inches='tight') print("...Completed!") -print(" Figure saved as "+circ_data["FIG_OUTPUT_DIR"]+"/"+circ_data["FIG_OUTPUT_FILENAME"]+"!") +print(" Figure saved as " + circ_data["FIG_OUTPUT_DIR"] + "/" + circ_data["FIG_OUTPUT_FILENAME"] + "!") # ====================================================================== diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps_usp.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps_usp.py index 37affd392..05e260454 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps_usp.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_CircComps_usp.py @@ -14,63 +14,63 @@ # ====================================================================== # START USER SPECIFIED SECTION # ====================================================================== -### Model name and output directory -MODEL=os.environ["CASENAME"] -MODEL_OUTPUT_DIR=os.environ["DATADIR"]+"/day" - -### Variable names -T2M_VAR=os.environ["tas_var"] -SLP_VAR=os.environ["psl_var"] -Z500_VAR=os.environ["zg_var"] -TIME_VAR=os.environ["time_coord"] -LAT_VAR=os.environ["lat_coord"] -LON_VAR=os.environ["lon_coord"] - -### Set range of years, season, and tail percentile threshold for calculations -yearbeg=int(os.environ["FIRSTYR"]) -yearend=int(os.environ["LASTYR"]) -monthstr=os.environ["monthstr"] -monthsub=os.environ["monthsub"] -ptile=int(os.environ["ptile"]) - -### Location information, use "-" or "_" instead of spaces in city name -city=os.environ["city"] -if city == "Yellowknife": #DJF - statlat=62.4540 - statlon=-114.3718 -elif city == "Rennes": #JJA - statlat=48.0698 - statlon=-1.7344 - -### Plotting parameters -lagstep=2 #number of days between lags -lagtot=4 #maximum lag prior to t=0 -SLPminval=980 -SLPmaxval=1040 -SLPrangestep=1 -SLPcbarstep=4 #different range for colorbar ticks -Tminval=-40 -Tmaxval=20 -Trangestep=2 -Tcbarstep=4 #different range for colorbar ticks -Tanomminval=-50 -Tanommaxval=50 -Tanomrangestep=0.5 -Z500minval=5000 -Z500maxval=5800 -Z500rangestep=50 -Z500cbarstep=100 #different range for colorbar ticks -Z500anomminval=-2 -Z500anommaxval=2 -Z500anomrangestep=0.1 - -### Model output figure -FIG_OUTPUT_DIR=os.environ["WK_DIR"]+"/model" -FIG_OUTPUT_FILENAME="CircComps_"+city+"_"+monthstr+".png" - -### Reanalysis output figure for comparisons -FIG_OBS_DIR=os.environ["WK_DIR"]+"/obs" -FIG_OBS_FILENAME="MERRA2_198001-200912_res=0.5-0.66.CircComps_"+city+"_"+monthstr+".png" +# Model name and output directory +MODEL = os.environ["CASENAME"] +MODEL_OUTPUT_DIR = os.environ["DATADIR"] + "/day" + +# Variable names +T2M_VAR = os.environ["tas_var"] +SLP_VAR = os.environ["psl_var"] +Z500_VAR = os.environ["zg_var"] +TIME_VAR = os.environ["time_coord"] +LAT_VAR = os.environ["lat_coord"] +LON_VAR = os.environ["lon_coord"] + +# Set range of years, season, and tail percentile threshold for calculations +yearbeg = int(os.environ["startdate"]) +yearend = int(os.environ["enddate"]) +monthstr = os.environ["monthstr"] +monthsub = os.environ["monthsub"] +ptile = int(os.environ["ptile"]) + +# Location information, use "-" or "_" instead of spaces in city name +city = os.environ["city"] +if city == "Yellowknife": # DJF + statlat = 62.4540 + statlon = -114.3718 +elif city == "Rennes": # JJA + statlat = 48.0698 + statlon = -1.7344 + +# Plotting parameters +lagstep = 2 # number of days between lags +lagtot = 4 # maximum lag prior to t=0 +SLPminval = 980 +SLPmaxval = 1040 +SLPrangestep = 1 +SLPcbarstep = 4 # different range for colorbar ticks +Tminval = -40 +Tmaxval = 20 +Trangestep = 2 +Tcbarstep = 4 # different range for colorbar ticks +Tanomminval = -50 +Tanommaxval = 50 +Tanomrangestep = 0.5 +Z500minval = 5000 +Z500maxval = 5800 +Z500rangestep = 50 +Z500cbarstep = 100 # different range for colorbar ticks +Z500anomminval = -2 +Z500anommaxval = 2 +Z500anomrangestep = 0.1 + +# Model output figure +FIG_OUTPUT_DIR = os.environ["WORK_DIR"] + "/model" +FIG_OUTPUT_FILENAME = "CircComps_" + city + "_" + monthstr + ".png" + +##Reanalysis output figure for comparisons +FIG_OBS_DIR = os.environ["WORK_DIR"] + "/obs" +FIG_OBS_FILENAME = "MERRA2_198001-200912_res=0.5-0.66.CircComps_" + city + "_" + monthstr + ".png" # ====================================================================== # END USER SPECIFIED SECTION @@ -80,99 +80,98 @@ # ====================================================================== # DO NOT MODIFY CODE BELOW # ====================================================================== -data={} - -data["MODEL"]=MODEL -data["MODEL_OUTPUT_DIR"]=MODEL_OUTPUT_DIR - -### File names -data["FIG_OUTPUT_DIR"]=FIG_OUTPUT_DIR -data["FIG_OUTPUT_FILENAME"]=FIG_OUTPUT_FILENAME -data["FIG_OBS_DIR"]=FIG_OBS_DIR -data["FIG_OBS_FILENAME"]=FIG_OBS_FILENAME - -### Variable names -data["T2M_VAR"]=T2M_VAR -data["SLP_VAR"]=SLP_VAR -data["Z500_VAR"]=Z500_VAR -data["TIME_VAR"]=TIME_VAR -data["LAT_VAR"]=LAT_VAR -data["LON_VAR"]=LON_VAR - -data["city"]=city -data["statlat"]=statlat -data["statlon"]=statlon - -data["yearbeg"]=yearbeg -data["yearend"]=yearend -data["monthsub"]=monthsub -data["monthstr"]=monthstr -data["ptile"]=ptile - -data["lagstep"]=lagstep -data["lagtot"]=lagtot -data["SLPminval"]=SLPminval -data["SLPmaxval"]=SLPmaxval -data["SLPrangestep"]=SLPrangestep -data["SLPcbarstep"]=SLPcbarstep -data["Tminval"]=Tminval -data["Tmaxval"]=Tmaxval -data["Trangestep"]=Trangestep -data["Tcbarstep"]=Tcbarstep -data["Tanomminval"]=Tanomminval -data["Tanommaxval"]=Tanommaxval -data["Tanomrangestep"]=Tanomrangestep -data["Z500minval"]=Z500minval -data["Z500maxval"]=Z500maxval -data["Z500rangestep"]=Z500rangestep -data["Z500cbarstep"]=Z500cbarstep -data["Z500anomminval"]=Z500anomminval -data["Z500anommaxval"]=Z500anommaxval -data["Z500anomrangestep"]=Z500anomrangestep +data = {} + +data["MODEL"] = MODEL +data["MODEL_OUTPUT_DIR"] = MODEL_OUTPUT_DIR + +# File names +data["FIG_OUTPUT_DIR"] = FIG_OUTPUT_DIR +data["FIG_OUTPUT_FILENAME"] = FIG_OUTPUT_FILENAME +data["FIG_OBS_DIR"] = FIG_OBS_DIR +data["FIG_OBS_FILENAME"] = FIG_OBS_FILENAME + +# Variable names +data["T2M_VAR"] = T2M_VAR +data["SLP_VAR"] = SLP_VAR +data["Z500_VAR"] = Z500_VAR +data["TIME_VAR"] = TIME_VAR +data["LAT_VAR"] = LAT_VAR +data["LON_VAR"] = LON_VAR + +data["city"] = city +data["statlat"] = statlat +data["statlon"] = statlon + +data["yearbeg"] = yearbeg +data["yearend"] = yearend +data["monthsub"] = monthsub +data["monthstr"] = monthstr +data["ptile"] = ptile + +data["lagstep"] = lagstep +data["lagtot"] = lagtot +data["SLPminval"] = SLPminval +data["SLPmaxval"] = SLPmaxval +data["SLPrangestep"] = SLPrangestep +data["SLPcbarstep"] = SLPcbarstep +data["Tminval"] = Tminval +data["Tmaxval"] = Tmaxval +data["Trangestep"] = Trangestep +data["Tcbarstep"] = Tcbarstep +data["Tanomminval"] = Tanomminval +data["Tanommaxval"] = Tanommaxval +data["Tanomrangestep"] = Tanomrangestep +data["Z500minval"] = Z500minval +data["Z500maxval"] = Z500maxval +data["Z500rangestep"] = Z500rangestep +data["Z500cbarstep"] = Z500cbarstep +data["Z500anomminval"] = Z500anomminval +data["Z500anommaxval"] = Z500anommaxval +data["Z500anomrangestep"] = Z500anomrangestep # Taking care of function arguments for calculating circulation composites -data["args1"]=[ \ -city, \ -statlat, \ -statlon, \ -lagstep, \ -lagtot, \ -yearbeg, \ -yearend, \ -monthsub, \ -monthstr, \ -ptile, \ -SLPminval, \ -SLPmaxval, \ -SLPrangestep, \ -SLPcbarstep, \ -Tminval, \ -Tmaxval, \ -Trangestep, \ -Tcbarstep, \ -Tanomminval, \ -Tanommaxval, \ -Tanomrangestep, \ -Z500minval, \ -Z500maxval, \ -Z500rangestep, \ -Z500cbarstep, \ -Z500anomminval, \ -Z500anommaxval, \ -Z500anomrangestep, \ -MODEL_OUTPUT_DIR, \ -MODEL, \ -FIG_OUTPUT_FILENAME, \ -FIG_OUTPUT_DIR, \ -FIG_OBS_FILENAME, \ -FIG_OBS_DIR, \ -TIME_VAR, \ -T2M_VAR, \ -SLP_VAR, \ -Z500_VAR, \ -LAT_VAR, \ -LON_VAR ] - -with open(os.environ["WK_DIR"]+"/TempExtDistShape_CircComps_parameters.json", "w") as outfile: +data["args1"] = [ + city, + statlat, + statlon, + lagstep, + lagtot, + yearbeg, + yearend, + monthsub, + monthstr, + ptile, + SLPminval, + SLPmaxval, + SLPrangestep, + SLPcbarstep, + Tminval, + Tmaxval, + Trangestep, + Tcbarstep, + Tanomminval, + Tanommaxval, + Tanomrangestep, + Z500minval, + Z500maxval, + Z500rangestep, + Z500cbarstep, + Z500anomminval, + Z500anommaxval, + Z500anomrangestep, + MODEL_OUTPUT_DIR, + MODEL, + FIG_OUTPUT_FILENAME, + FIG_OUTPUT_DIR, + FIG_OBS_FILENAME, + FIG_OBS_DIR, + TIME_VAR, + T2M_VAR, + SLP_VAR, + Z500_VAR, + LAT_VAR, + LON_VAR] + +with open(os.environ["WORK_DIR"] + "/TempExtDistShape_CircComps_parameters.json", "w") as outfile: json.dump(data, outfile) - diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist.py index fa247a9c6..b9e64e5cd 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist.py @@ -14,9 +14,12 @@ # This file is part of the Surface Temperature Extremes and Distribution Shape Package # and the MDTF code package. See LICENSE.txt for the license. # -# Computes the Gaussian distribution fit to the histogram of seasonal two-meter temperature anomalies at specified locations following Loikith and Neelin (2019), Loikith et al. (2018), Loikith and Neelin (2015), Ruff and Neelin (2012) +# Computes the Gaussian distribution fit to the histogram of seasonal two-meter temperature anomalies at +# specified locations following Loikith and Neelin (2019), Loikith et al. (2018), Loikith and Neelin (2015), +# Ruff and Neelin (2012) # -# Generates plot of the distribution and associated Gaussian fit over season specified as function of two-meter temperature +# Generates plot of the distribution and associated Gaussian fit over season specified as function of +# two-meter temperature # # Depends on the following scripts: # (1) TempExtDistShape_FreqDist_usp.py @@ -32,6 +35,7 @@ import os import json import matplotlib + matplotlib.use('Agg') import matplotlib.pyplot as mplt import numpy @@ -46,54 +50,63 @@ print("**************************************************") # ====================================================================== -### Load user-specified parameters (usp) for calcluating and plotting shift ratio +# Load user-specified parameters (usp) for calcluating and plotting shift ratio print("Load user-specified parameters including season...") -os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_FreqDist_usp.py") -with open(os.environ["WK_DIR"]+"/TempExtDistShape_FreqDist_parameters.json") as outfile: - freq_data=json.load(outfile) +os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_FreqDist_usp.py") +with open(os.environ["WORK_DIR"] + "/TempExtDistShape_FreqDist_parameters.json") as outfile: + freq_data = json.load(outfile) print("...Loaded!") # ====================================================================== -### List model filenames for two-meter temperature -T2Mfile=sorted(glob.glob(freq_data["MODEL_OUTPUT_DIR"]+"/"+freq_data["MODEL"]+"*"+freq_data["T2M_VAR"]+".day.nc"))[0] +# List model filenames for two-meter temperature +T2Mfile =( + sorted(glob.glob(freq_data["MODEL_OUTPUT_DIR"] + "/" + freq_data["MODEL"] + "*" + freq_data["T2M_VAR"] + ".day.nc")))[0] # ====================================================================== -### Estimate and plot Gaussian fit to two-meter temperature distribution at specified locations, save figure in wkdir/TempExtDistShape/ +# Estimate and plot Gaussian fit to two-meter temperature distribution at specified locations, +# save figure in wkdir/TempExtDistShape/ # ----- Set figure prior to looping over each city and adding to each subplot # ----- Large subplot 111 added to display overall x and y labels properly in plotting -fig = mplt.figure(figsize=(9,10)) +fig = mplt.figure(figsize=(9, 10)) ax_lg = fig.add_subplot(111) -for statind in numpy.arange(0,len(freq_data["citynames"])): - +for statind in numpy.arange(0, len(freq_data["citynames"])): # ====================================================================== - ### Calculate seasonal anomalies for two-meter temperature - Tanom_data,lon,lat=Seasonal_Anomalies(T2Mfile,freq_data["LON_VAR"],freq_data["LAT_VAR"],freq_data["T2M_VAR"],freq_data["TIME_VAR"],freq_data["monthsub"],freq_data["yearbeg"],freq_data["yearend"],statind) + # Calculate seasonal anomalies for two-meter temperature + Tanom_data, lon, lat = Seasonal_Anomalies(T2Mfile, freq_data["LON_VAR"], freq_data["LAT_VAR"], freq_data["T2M_VAR"], + freq_data["TIME_VAR"], freq_data["monthsub"], freq_data["yearbeg"], + freq_data["yearend"], statind) # ====================================================================== - ### Estimate Gaussian fit to two-meter temperature distribution at specified location - bin_centers_gauss,bin_centers,bin_counts,gauss_fit,Tanom_stat=Gaussfit_Est(Tanom_data,lat,lon,statind,freq_data["statlats"],freq_data["statlons"],freq_data["citynames"],freq_data["binwidth"]) + # Estimate Gaussian fit to two-meter temperature distribution at specified location + bin_centers_gauss, bin_centers, bin_counts, gauss_fit, Tanom_stat = Gaussfit_Est(Tanom_data, lat, lon, statind, + freq_data["statlats"], + freq_data["statlons"], + freq_data["citynames"], + freq_data["binwidth"]) # ====================================================================== - ### Plot two-meter temperature distribution and estimated Gaussian fit computed above at specified location, save to wkdir/TempExtDistShape - Gaussfit_Plot(fig,bin_centers,bin_counts,bin_centers_gauss,gauss_fit,Tanom_stat,freq_data["ptile"],freq_data["citynames"],freq_data["monthstr"],statind,freq_data["plotrows"],freq_data["plotcols"]) + # Plot two-meter temperature distribution and estimated Gaussian fit computed above at specified location, + # save to wkdir/TempExtDistShape + Gaussfit_Plot(fig, bin_centers, bin_counts, bin_centers_gauss, gauss_fit, Tanom_stat, freq_data["ptile"], + freq_data["citynames"], freq_data["monthstr"], statind, freq_data["plotrows"], freq_data["plotcols"]) -### Turn off axis lines and ticks of the big subplot and set x & y labels +# Turn off axis lines and ticks of the big subplot and set x & y labels ax_lg.spines['top'].set_color('none') ax_lg.spines['bottom'].set_color('none') ax_lg.spines['left'].set_color('none') ax_lg.spines['right'].set_color('none') ax_lg.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off') -ax_lg.set_ylabel('Normalized Frequency',fontsize=14,labelpad=30) -ax_lg.set_xlabel('Temperature Anomaly ('+u"\u00b0"+'C)',fontsize=14,labelpad=16) +ax_lg.set_ylabel('Normalized Frequency', fontsize=14, labelpad=30) +ax_lg.set_xlabel('Temperature Anomaly (' + u"\u00b0" + 'C)', fontsize=14, labelpad=16) -### Format subplot spacing +# Format subplot spacing fig.subplots_adjust(wspace=0.2, hspace=0.25) -### Save figure to PDF +# Save figure to PDF fig.canvas.draw() -fig.savefig(freq_data["FIG_OUTPUT_DIR"]+"/"+freq_data["FIG_OUTPUT_FILENAME"],bbox_inches='tight') -print(" Figure saved as "+freq_data["FIG_OUTPUT_DIR"]+'/'+freq_data["FIG_OUTPUT_FILENAME"]+"!") +fig.savefig(freq_data["FIG_OUTPUT_DIR"] + "/" + freq_data["FIG_OUTPUT_FILENAME"], bbox_inches='tight') +print(" Figure saved as " + freq_data["FIG_OUTPUT_DIR"] + '/' + freq_data["FIG_OUTPUT_FILENAME"] + "!") # ====================================================================== diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_usp.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_usp.py index 546809b4d..c3a7f0151 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_usp.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_usp.py @@ -14,40 +14,40 @@ # ====================================================================== # START USER SPECIFIED SECTION # ====================================================================== -### Model name and output directory -MODEL=os.environ["CASENAME"] -MODEL_OUTPUT_DIR=os.environ["DATADIR"]+"/day" +# Model name and output directory +MODEL = os.environ["CASENAME"] +MODEL_OUTPUT_DIR = os.environ["DATADIR"] + "/day" -### Variable Names -T2M_VAR=os.environ["tas_var"] -TIME_VAR=os.environ["time_coord"] -LAT_VAR=os.environ["lat_coord"] -LON_VAR=os.environ["lon_coord"] +# Variable Names +T2M_VAR = os.environ["tas_var"] +TIME_VAR = os.environ["time_coord"] +LAT_VAR = os.environ["lat_coord"] +LON_VAR = os.environ["lon_coord"] -### Locations and associated coordinates -citynames=['Yellowknife','Pendleton','Rennes St-Jacques','Berlin','Adelaide','North Platte'] -statlats=[62.4540,45.6721,48.0698,52.5200,-34.9285,41.1403] #in degrees N -statlons=[-114.3718,-118.7886,-1.7344,13.4050,138.6007,-100.7601] #in degrees E +# Locations and associated coordinates +citynames = ['Yellowknife', 'Pendleton', 'Rennes St-Jacques', 'Berlin', 'Adelaide', 'North Platte'] +statlats = [62.4540, 45.6721, 48.0698, 52.5200, -34.9285, 41.1403] # in degrees N +statlons = [-114.3718, -118.7886, -1.7344, 13.4050, 138.6007, -100.7601] # in degrees E -### Set season, binwidth, and percentile threshold for Gaussian fit calculations -yearbeg=int(os.environ["FIRSTYR"]) -yearend=int(os.environ["LASTYR"]) -monthsub=[[1,2,12],[1,2,12],[6,7,8],[1,2,12],[1,2,12],[1,2,12]] -monthstr=['DJF','DJF','JJA','DJF','DJF','DJF'] -binwidth=1 #to compute temperature anomaly histogram for fitting Gaussian distribution -ptile=int(os.environ["ptile"]) +# Set season, binwidth, and percentile threshold for Gaussian fit calculations +yearbeg = int(os.environ["startdate"]) +yearend = int(os.environ["enddate"]) +monthsub = [[1, 2, 12], [1, 2, 12], [6, 7, 8], [1, 2, 12], [1, 2, 12], [1, 2, 12]] +monthstr = ['DJF', 'DJF', 'JJA', 'DJF', 'DJF', 'DJF'] +binwidth = 1 # to compute temperature anomaly histogram for fitting Gaussian distribution +ptile = int(os.environ["ptile"]) -### Figure subplots - depends on number of locations specified -plotrows=3 -plotcols=2 +# Figure subplots - depends on number of locations specified +plotrows = 3 +plotcols = 2 -### Save figure to filename/directory -FIG_OUTPUT_DIR=os.environ["WK_DIR"]+"/model/PS" -FIG_OUTPUT_FILENAME='FreqDists.ps' +# Save figure to filename/directory +FIG_OUTPUT_DIR = os.environ["WK_DIR"] + "/model/PS" +FIG_OUTPUT_FILENAME = 'FreqDists.ps' -### Reanalysis output figure for comparisons -FIG_OBS_DIR=os.environ["WK_DIR"]+"/obs/PS" -FIG_OBS_FILENAME="MERRA2_198001-200912_res=0.5-0.66.FreqDists.png" +# Reanalysis output figure for comparisons +FIG_OBS_DIR = os.environ["WK_DIR"] + "/obs/PS" +FIG_OBS_FILENAME = "MERRA2_198001-200912_res=0.5-0.66.FreqDists.png" # ====================================================================== # END USER SPECIFIED SECTION @@ -57,56 +57,55 @@ # ====================================================================== # DO NOT MODIFY CODE BELOW # ====================================================================== -data={} +data = {} -data["MODEL"]=MODEL -data["MODEL_OUTPUT_DIR"]=MODEL_OUTPUT_DIR -data["FIG_OUTPUT_DIR"]=FIG_OUTPUT_DIR -data["FIG_OUTPUT_FILENAME"]=FIG_OUTPUT_FILENAME -data["FIG_OBS_DIR"]=FIG_OBS_DIR -data["FIG_OBS_FILENAME"]=FIG_OBS_FILENAME +data["MODEL"] = MODEL +data["MODEL_OUTPUT_DIR"] = MODEL_OUTPUT_DIR +data["FIG_OUTPUT_DIR"] = FIG_OUTPUT_DIR +data["FIG_OUTPUT_FILENAME"] = FIG_OUTPUT_FILENAME +data["FIG_OBS_DIR"] = FIG_OBS_DIR +data["FIG_OBS_FILENAME"] = FIG_OBS_FILENAME -data["TIME_VAR"]=TIME_VAR -data["LAT_VAR"]=LAT_VAR -data["LON_VAR"]=LON_VAR -data["T2M_VAR"]=T2M_VAR +data["TIME_VAR"] = TIME_VAR +data["LAT_VAR"] = LAT_VAR +data["LON_VAR"] = LON_VAR +data["T2M_VAR"] = T2M_VAR -data["yearbeg"]=yearbeg -data["yearend"]=yearend -data["ptile"]=ptile -data["monthsub"]=monthsub -data["monthstr"]=monthstr -data["statlats"]=statlats -data["statlons"]=statlons -data["citynames"]=citynames -data["binwidth"]=binwidth -data["plotrows"]=plotrows -data["plotcols"]=plotcols +data["yearbeg"] = yearbeg +data["yearend"] = yearend +data["ptile"] = ptile +data["monthsub"] = monthsub +data["monthstr"] = monthstr +data["statlats"] = statlats +data["statlons"] = statlons +data["citynames"] = citynames +data["binwidth"] = binwidth +data["plotrows"] = plotrows +data["plotcols"] = plotcols # Taking care of function arguments for calculating/plotting Gaussian fit -data["args1"]=[ -monthsub, \ -monthstr, \ -statlats, \ -statlons, \ -citynames, \ -binwidth, \ -plotrows, \ -plotcols, \ -yearbeg, \ -yearend, \ -ptile, \ -MODEL_OUTPUT_DIR, \ -MODEL, \ -FIG_OUTPUT_FILENAME, \ -FIG_OUTPUT_DIR, \ -FIG_OBS_DIR, \ -FIG_OBS_FILENAME, \ -TIME_VAR, \ -T2M_VAR, \ -LAT_VAR, \ -LON_VAR ] +data["args1"] = [ + monthsub, + monthstr, + statlats, + statlons, + citynames, + binwidth, + plotrows, + plotcols, + yearbeg, + yearend, + ptile, + MODEL_OUTPUT_DIR, + MODEL, + FIG_OUTPUT_FILENAME, + FIG_OUTPUT_DIR, + FIG_OBS_DIR, + FIG_OBS_FILENAME, + TIME_VAR, + T2M_VAR, + LAT_VAR, + LON_VAR] -with open(os.environ["WK_DIR"]+"/TempExtDistShape_FreqDist_parameters.json", "w") as outfile: +with open(os.environ["WORK_DIR"] + "/TempExtDistShape_FreqDist_parameters.json", "w") as outfile: json.dump(data, outfile) - diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_util.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_util.py index 5b2f269be..17af724c3 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_util.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_FreqDist_util.py @@ -18,9 +18,10 @@ # Import standard Python packages import os import numpy -from netCDF4 import Dataset,num2date +from netCDF4 import Dataset, num2date import cftime import matplotlib + matplotlib.use('Agg') import matplotlib.pyplot as mplt import math @@ -29,157 +30,169 @@ from scipy import signal from scipy.optimize import curve_fit + # ====================================================================== -### Seasonal_Anomalies -### Read in two-meter temperature variable from netcdf file and compute seasonal anomalies +# Seasonal_Anomalies +# Read in two-meter temperature variable from netcdf file and compute seasonal anomalies # ----- model_netcdf_filename is string name of directory location of netcdf file to be opened # ----- lon_var,lat_var,field_var,time_var are string names of longitude, latitude, variable, and time in netcdf file # ----- monthsub is array of months (integers) for seasonal analysis # ----- yearbeg and yearend of range of years for analysis # ----- statind is city index from loop calling function for each specified city # --------- Output is two-meter temperature seasonal anomalies, longitude, and latitude arrays -def Seasonal_Anomalies(model_netcdf_filename,lon_var,lat_var,field_var,time_var,monthsub,yearbeg,yearend,statind): - var_netcdf=Dataset(model_netcdf_filename,"r") - lon=numpy.asarray(var_netcdf.variables[lon_var][:],dtype="float") - lat=numpy.asarray(var_netcdf.variables[lat_var][:],dtype="float") - var_data=numpy.asarray(var_netcdf.variables[field_var][:],dtype="float") #time, lat, lon - datatime=numpy.asarray(var_netcdf.variables[time_var][:],dtype="float") - timeunits=var_netcdf.variables[time_var].units - varunits=var_netcdf.variables[field_var].units - caltype=var_netcdf.variables[time_var].calendar +def Seasonal_Anomalies(model_netcdf_filename, lon_var, lat_var, field_var, time_var, monthsub, yearbeg, yearend, + statind): + var_netcdf = Dataset(model_netcdf_filename, "r") + lon = numpy.asarray(var_netcdf.variables[lon_var][:], dtype="float") + lat = numpy.asarray(var_netcdf.variables[lat_var][:], dtype="float") + var_data = numpy.asarray(var_netcdf.variables[field_var][:], dtype="float") # time, lat, lon + datatime = numpy.asarray(var_netcdf.variables[time_var][:], dtype="float") + timeunits = var_netcdf.variables[time_var].units + varunits = var_netcdf.variables[field_var].units + caltype = var_netcdf.variables[time_var].calendar var_netcdf.close() - ### Fix longitudes so values range from -180 to 180 - if lon[lon>180].size>0: - lon[lon>180]=lon[lon>180]-360 + # Fix longitudes so values range from -180 to 180 + if lon[lon > 180].size > 0: + lon[lon > 180] = lon[lon > 180] - 360 - ### Reshape data to [lon, lat, time] dimensions for code to run properly + # Reshape data to [lon, lat, time] dimensions for code to run properly if len(var_data.shape) == 4: - var_data=numpy.squeeze(var_data) - if var_data.shape == (len(lon),len(lat),len(datatime)): - var_data=var_data - elif var_data.shape == (len(lat),len(datatime),len(lon)): - var_data=numpy.transpose(var_data,(2,0,1)) - elif var_data.shape == (len(lon),len(datatime),len(lat)): - var_data=numpy.transpose(var_data,(0,2,1)) - elif var_data.shape == (len(datatime),len(lon),len(lat)): - var_data=numpy.transpose(var_data,(1,2,0)) - elif var_data.shape == (len(lat),len(lon),len(datatime)): - var_data=numpy.transpose(var_data,(1,0,2)) - elif var_data.shape == (len(datatime),len(lat),len(lon)): - var_data=numpy.transpose(var_data,(2,1,0)) - - ### Subset temperature to time range specified by "yearbeg,yearend" values - datecf=[cftime.num2date(t,units=timeunits,calendar=caltype) for t in datatime] - date= numpy.array([T.strftime('%Y-%m-%d') for T in list(datecf)]) #this converts the arrays of timesteps output above to a more readable string format - mo=numpy.array([int('{0.month:02d}'.format(t)) for t in list(datecf)]) - yr=numpy.array([int('{0.year:04d}'.format(t)) for t in list(datecf)]) + var_data = numpy.squeeze(var_data) + if var_data.shape == (len(lon), len(lat), len(datatime)): + var_data = var_data + elif var_data.shape == (len(lat), len(datatime), len(lon)): + var_data = numpy.transpose(var_data, (2, 0, 1)) + elif var_data.shape == (len(lon), len(datatime), len(lat)): + var_data = numpy.transpose(var_data, (0, 2, 1)) + elif var_data.shape == (len(datatime), len(lon), len(lat)): + var_data = numpy.transpose(var_data, (1, 2, 0)) + elif var_data.shape == (len(lat), len(lon), len(datatime)): + var_data = numpy.transpose(var_data, (1, 0, 2)) + elif var_data.shape == (len(datatime), len(lat), len(lon)): + var_data = numpy.transpose(var_data, (2, 1, 0)) + + # Subset temperature to time range specified by "yearbeg,yearend" values + datecf = [cftime.num2date(t, units=timeunits, calendar=caltype) for t in datatime] + mo = numpy.array([int('{0.month:02d}'.format(t)) for t in list(datecf)]) + yr = numpy.array([int('{0.year:04d}'.format(t)) for t in list(datecf)]) leapstr = numpy.array(['{0.month:2d}-{0.day:2d}'.format(t) for t in list(datecf)]) - yearind = numpy.where(numpy.logical_and(yr>=yearbeg, yr<=yearend))[0] - var_data=var_data[:,:,yearind] - leapstr=leapstr[yearind] - mo=mo[yearind] - yr=yr[yearind] - - ### Subset temperature to season specified by "monthsub" vector - moinds=numpy.in1d(mo,monthsub[statind]) - moinds=(numpy.where(moinds)[0]) - moinds=[int(indval) for indval in moinds] - leapstr=leapstr[moinds] - var_data=var_data[:,:,moinds] - - ### Remove leap days - dateind=(leapstr != '02-29') - leapstr=leapstr[dateind] - var_data=var_data[:,:,dateind] - - ### Compute temperature anomaly - days_uniq=numpy.unique(leapstr) - var_anom=numpy.empty(var_data.shape) - dayinds=[numpy.where(leapstr==dd)[0] for dd in days_uniq] - for begval in numpy.arange(0,len(dayinds)): - temp_clim=numpy.mean(var_data[:,:,dayinds[begval]],axis=2) - temp_clim=temp_clim.reshape(temp_clim.shape[0],temp_clim.shape[1], 1) - var_anom[:,:,dayinds[begval]]=var_data[:,:,dayinds[begval]]-temp_clim - return var_anom,lon,lat + yearind = numpy.where(numpy.logical_and(yr >= yearbeg, yr <= yearend))[0] + var_data = var_data[:, :, yearind] + leapstr = leapstr[yearind] + mo = mo[yearind] + + # Subset temperature to season specified by "monthsub" vector + moinds = numpy.in1d(mo, monthsub[statind]) + moinds = (numpy.where(moinds)[0]) + moinds = [int(indval) for indval in moinds] + leapstr = leapstr[moinds] + var_data = var_data[:, :, moinds] + + # Remove leap days + dateind = (leapstr != '02-29') + leapstr = leapstr[dateind] + var_data = var_data[:, :, dateind] + + # Compute temperature anomaly + days_uniq = numpy.unique(leapstr) + var_anom = numpy.empty(var_data.shape) + dayinds = [numpy.where(leapstr == dd)[0] for dd in days_uniq] + for begval in numpy.arange(0, len(dayinds)): + temp_clim = numpy.mean(var_data[:, :, dayinds[begval]], axis=2) + temp_clim = temp_clim.reshape(temp_clim.shape[0], temp_clim.shape[1], 1) + var_anom[:, :, dayinds[begval]] = var_data[:, :, dayinds[begval]] - temp_clim + return var_anom, lon, lat + # ====================================================================== -### Gaussfit_Params -### Function to fit Gaussian distribution using two degree polynomial +# Gaussfit_Params +# Function to fit Gaussian distribution using two degree polynomial # ----- y=A*exp(-(x-mu)**2/(2*sigma**2)) is Gaussian fit equation # ----- x is the array of bin centers from the histogram # ----- y is the array of bin counts from the histogram # ----- Threshold h is the fraction from the maximum y height of the data (0 < h < 1) # --------- Output is parameters sigma, mu, and A from Gaussian fit equation -def Gaussfit_Params(x,y,h): - ymax=max(y) - xnew=[] - ynew=[] - for i in numpy.arange(0,len(x)): - if y[i]>ymax*h: +def Gaussfit_Params(x, y, h): + ymax = max(y) + xnew = [] + ynew = [] + for i in numpy.arange(0, len(x)): + if y[i] > ymax * h: xnew.append(x[i]) ynew.append(y[i]) - ylog=[math.log(yval) for yval in ynew] - A2,A1,A0=numpy.polyfit(xnew,ylog,2) - sigma=math.sqrt(numpy.true_divide(-1,(2*A2))) - mu=A1*sigma**2; - A=math.exp(A0+numpy.true_divide(mu**2,(2*sigma**2))); - return sigma,mu,A + ylog = [math.log(yval) for yval in ynew] + A2, A1, A0 = numpy.polyfit(xnew, ylog, 2) + sigma = math.sqrt(numpy.true_divide(-1, (2 * A2))) + mu = A1 * sigma ** 2; + A = math.exp(A0 + numpy.true_divide(mu ** 2, (2 * sigma ** 2))); + return sigma, mu, A + # ====================================================================== -### Gaussfit_Est -### Estimates the Gaussian fit to the histogram of the distribution of two-meter temperature anomalies at specified station/season +# Gaussfit_Est +# Estimates the Gaussian fit to the histogram of the distribution of two-meter temperature anomalies at +# specified station/season # ----- T2Manom_data is 2-meter temperature anomaly data output from Seasonal_Anomalies function above # ----- lat and lon are latitude and longitude arrays output from Seasonal_Anomalies function above # ----- statind is city index from loop calling function for each specified location # ----- statlats, statlons, and citynames are arrays of city name strings and associated coordinates # ----- binwidth is for histogram binning of temperature anomalies at each city -# --------- Output is centers and counts of histogram bins of temperature anomaly data, fixed bin centers if gaussian fit is too wide, gaussian fit, and detrended temperature anomaly array for location -def Gaussfit_Est(T2Manom_data,lat,lon,statind,statlats,statlons,citynames,binwidth): - print(" Estimating Gaussian fit at "+citynames[statind]+"...") - Tanom_data=signal.detrend(T2Manom_data,axis=2,type='linear') - - ### Determine grid cell closest to chosen location - statlatind=numpy.argmin(numpy.abs(lat-statlats[statind])) - statlonind=numpy.argmin(numpy.abs(lon-statlons[statind])) - - ### Subset temperature anomaly to this grid cell to compute tail days based on percentile threshold - Tanom_stat=numpy.squeeze(Tanom_data[statlonind,statlatind,:]) - - ### Compute histogram - numpy.histogram outputs bin edges but require bin centers so define edges here accordingly to match count between hist and bin_edges - xx,bin_centers=numpy.histogram(Tanom_stat,bins=numpy.arange(min(Tanom_stat)+numpy.true_divide(binwidth,2),max(Tanom_stat)+numpy.true_divide(binwidth,2),binwidth)) - bin_counts,xx=numpy.histogram(Tanom_stat,bins=numpy.arange(min(Tanom_stat),max(Tanom_stat)+binwidth,binwidth)) - - ### Compute Gaussian fit parameters using polynomial +# --------- Output is centers and counts of histogram bins of temperature anomaly data, +# fixed bin centers if gaussian fit is too wide, gaussian fit, and detrended temperature anomaly array for location +def Gaussfit_Est(T2Manom_data, lat, lon, statind, statlats, statlons, citynames, binwidth): + print(" Estimating Gaussian fit at " + citynames[statind] + "...") + Tanom_data = signal.detrend(T2Manom_data, axis=2, type='linear') + + # Determine grid cell closest to chosen location + statlatind = numpy.argmin(numpy.abs(lat - statlats[statind])) + statlonind = numpy.argmin(numpy.abs(lon - statlons[statind])) + + # Subset temperature anomaly to this grid cell to compute tail days based on percentile threshold + Tanom_stat = numpy.squeeze(Tanom_data[statlonind, statlatind, :]) + + # Compute histogram - numpy.histogram outputs bin edges but require bin centers so define edges here + # accordingly to match count between hist and bin_edges + xx, bin_centers = numpy.histogram(Tanom_stat, bins=numpy.arange(min(Tanom_stat) + numpy.true_divide(binwidth, 2), + max(Tanom_stat) + numpy.true_divide(binwidth, 2), + binwidth)) + bin_counts, xx = numpy.histogram(Tanom_stat, + bins=numpy.arange(min(Tanom_stat), max(Tanom_stat) + binwidth, binwidth)) + + # Compute Gaussian fit parameters using polynomial # ----- Normalize counts, and fit to core using 0.3 of the maximum value # ----- See Ruff and Neelin (2012) and Loikith and Neelin (2015) - bin_counts=numpy.true_divide(bin_counts,max(bin_counts)) - sigma,mu,A=Gaussfit_Params(bin_centers,bin_counts,0.3) + bin_counts = numpy.true_divide(bin_counts, max(bin_counts)) + sigma, mu, A = Gaussfit_Params(bin_centers, bin_counts, 0.3) - ### If the standard deviation of the fit to the core is greater than the SD of the entire distribution, use SD of entire distribution + # If the standard deviation of the fit to the core is greater than the SD of the entire distribution, + # use SD of entire distribution if sigma > numpy.std(Tanom_stat): - sigma=numpy.std(Tanom_stat) - - ### Determine Gaussian fit using equation and parameters from gaussfit_params, and bin centers from histogram - gauss_fit=[A*math.exp(numpy.true_divide(-(x-mu)**2,2*sigma**2)) for x in bin_centers] - - ### Gaussian fit often does not extend to zero. Extend bin_centers and recompute fit to facilitate plotting - if gauss_fit[-1]pthresh] - pthresh_bincounts=bin_counts[bin_centers>pthresh] + pthresh_bincenters = bin_centers[bin_centers > pthresh] + pthresh_bincounts = bin_counts[bin_centers > pthresh] elif ptile > 50: - pthresh_bincenters=bin_centers[bin_centers180].size>0: - lon[lon>180]=lon[lon>180]-360 - LAT,LON=numpy.meshgrid(lat,lon,sparse=False,indexing="xy") - LAT=numpy.reshape(LAT,(-1,1)) - LON=numpy.reshape(LON,(-1,1)) - LATLON=numpy.squeeze(numpy.array((LAT,LON))) - LATLON=LATLON.transpose() - mask_region=numpy.zeros(LAT.size) + # Fix longitudes so values range from -180 to 180 + if lon[lon > 180].size > 0: + lon[lon > 180] = lon[lon > 180] - 360 + LAT, LON = numpy.meshgrid(lat, lon, sparse=False, indexing="xy") + LAT = numpy.reshape(LAT, (-1, 1)) + LON = numpy.reshape(LON, (-1, 1)) + LATLON = numpy.squeeze(numpy.array((LAT, LON))) + LATLON = LATLON.transpose() + mask_region = numpy.zeros(LAT.size) for latlon_idx in numpy.arange(mask_region.shape[0]): - mask_region[latlon_idx]=regMaskInterpolator(LATLON[latlon_idx,:]) - mask_region=numpy.reshape(mask_region,(-1,lat.size)) - mask_region[mask_region!=1]=numpy.nan + mask_region[latlon_idx] = regMaskInterpolator(LATLON[latlon_idx, :]) + mask_region = numpy.reshape(mask_region, (-1, lat.size)) + mask_region[mask_region != 1] = numpy.nan print("...Generated!") return mask_region + # ====================================================================== -### Seasonal_Moments -### Read in two-meter temperature variable from netcdf file and compute seasonal subset +# Seasonal_Moments +# Read in two-meter temperature variable from netcdf file and compute seasonal subset # ----- model_netcdf_filename is string name of directory location of netcdf file to be opened # ----- lon_var,lat_var,field_var,time_var are string names of longitude, latitude, variable, and time in netcdf file # ----- monthsub is array of months (integers) for seasonal analysis # ----- yearbeg and yearend of range of years for analysis # --------- Output is two-meter temperature seasonal data arrays, and longitude and latitude arrays -def Seasonal_Moments(model_netcdf_filename,lon_var,lat_var,field_var,time_var,monthsub,yearbeg,yearend,mask_region): +def Seasonal_Moments(model_netcdf_filename, lon_var, lat_var, field_var, time_var, monthsub, yearbeg, yearend, + mask_region): print(" Computing seasonal temperature moments...") - var_netcdf=Dataset(model_netcdf_filename,"r") - lat=numpy.asarray(var_netcdf.variables[lat_var][:],dtype="float") - lon=numpy.asarray(var_netcdf.variables[lon_var][:],dtype="float") - var_data=numpy.asarray(var_netcdf.variables[field_var][:],dtype="float") #time, lat, lon - datatime=numpy.asarray(var_netcdf.variables[time_var][:],dtype="float") - timeunits=var_netcdf.variables[time_var].units - varunits=var_netcdf.variables[field_var].units - caltype=var_netcdf.variables[time_var].calendar + var_netcdf = Dataset(model_netcdf_filename, "r") + lat = numpy.asarray(var_netcdf.variables[lat_var][:], dtype="float") + lon = numpy.asarray(var_netcdf.variables[lon_var][:], dtype="float") + var_data = numpy.asarray(var_netcdf.variables[field_var][:], dtype="float") # time, lat, lon + datatime = numpy.asarray(var_netcdf.variables[time_var][:], dtype="float") + timeunits = var_netcdf.variables[time_var].units + varunits = var_netcdf.variables[field_var].units + caltype = var_netcdf.variables[time_var].calendar var_netcdf.close() - ### Fix longitudes so values range from -180 to 180 - if lon[lon>180].size>0: - lon[lon>180]=lon[lon>180]-360 + # Fix longitudes so values range from -180 to 180 + if lon[lon > 180].size > 0: + lon[lon > 180] = lon[lon > 180] - 360 - ### Reshape data to [lon, lat, time] dimensions for code to run properly + # Reshape data to [lon, lat, time] dimensions for code to run properly if len(var_data.shape) == 4: - var_data=numpy.squeeze(var_data) - if var_data.shape == (len(lon),len(lat),len(datatime)): - var_data=var_data - elif var_data.shape == (len(lat),len(datatime),len(lon)): - var_data=numpy.transpose(var_data,(2,0,1)) - elif var_data.shape == (len(lon),len(datatime),len(lat)): - var_data=numpy.transpose(var_data,(0,2,1)) - elif var_data.shape == (len(datatime),len(lon),len(lat)): - var_data=numpy.transpose(var_data,(1,2,0)) - elif var_data.shape == (len(lat),len(lon),len(datatime)): - var_data=numpy.transpose(var_data,(1,0,2)) - elif var_data.shape == (len(datatime),len(lat),len(lon)): - var_data=numpy.transpose(var_data,(2,1,0)) + var_data = numpy.squeeze(var_data) + if var_data.shape == (len(lon), len(lat), len(datatime)): + var_data = var_data + elif var_data.shape == (len(lat), len(datatime), len(lon)): + var_data = numpy.transpose(var_data, (2, 0, 1)) + elif var_data.shape == (len(lon), len(datatime), len(lat)): + var_data = numpy.transpose(var_data, (0, 2, 1)) + elif var_data.shape == (len(datatime), len(lon), len(lat)): + var_data = numpy.transpose(var_data, (1, 2, 0)) + elif var_data.shape == (len(lat), len(lon), len(datatime)): + var_data = numpy.transpose(var_data, (1, 0, 2)) + elif var_data.shape == (len(datatime), len(lat), len(lon)): + var_data = numpy.transpose(var_data, (2, 1, 0)) - ### Subset temperature to time range specified by "yearbeg,yearend" values - datecf=[cftime.num2date(t,units=timeunits,calendar=caltype) for t in datatime] - date= numpy.array([T.strftime('%Y-%m-%d') for T in list(datecf)]) #this converts the arrays of timesteps output above to a more readable string format - mo=numpy.array([int('{0.month:02d}'.format(t)) for t in list(datecf)]) - yr=numpy.array([int('{0.year:04d}'.format(t)) for t in list(datecf)]) + # Subset temperature to time range specified by "yearbeg,yearend" values + datecf = [cftime.num2date(t, units=timeunits, calendar=caltype) for t in datatime] + mo = numpy.array([int('{0.month:02d}'.format(t)) for t in list(datecf)]) + yr = numpy.array([int('{0.year:04d}'.format(t)) for t in list(datecf)]) leapstr = numpy.array(['{0.month:2d}-{0.day:2d}'.format(t) for t in list(datecf)]) - yearind = numpy.where(numpy.logical_and(yr>=yearbeg, yr<=yearend))[0] - var_data=var_data[:,:,yearind] - leapstr=leapstr[yearind] - mo=mo[yearind] - yr=yr[yearind] + yearind = numpy.where(numpy.logical_and(yr >= yearbeg, yr <= yearend))[0] + var_data = var_data[:, :, yearind] + leapstr = leapstr[yearind] + mo = mo[yearind] - ### Subset temperature to season specified by "monthsub" vector - moinds=numpy.in1d(mo,monthsub) - moinds=(numpy.where(moinds)[0]) - moinds=[int(indval) for indval in moinds] - leapstr=leapstr[moinds] - var_data=var_data[:,:,moinds] + # Subset temperature to season specified by "monthsub" vector + moinds = numpy.in1d(mo, monthsub) + moinds = (numpy.where(moinds)[0]) + moinds = [int(indval) for indval in moinds] + leapstr = leapstr[moinds] + var_data = var_data[:, :, moinds] - ### Remove leap days - dateind=(leapstr != '02-29') - leapstr=leapstr[dateind] - var_data=var_data[:,:,dateind] + # Remove leap days + dateind = (leapstr != '02-29') + var_data = var_data[:, :, dateind] if varunits == 'K' or varunits == 'Kelvin': - var_data=var_data-273 + var_data = var_data - 273 - ### Mask temperature array by cloning 2D lon-lat mask array output from Mask_Region function to size of temperature array in time dimension - masked_T2M = var_data*(numpy.repeat(mask_region[:,:,numpy.newaxis], var_data.shape[2], axis=2)) - seas_mean=numpy.transpose(numpy.nanmean(masked_T2M,2)) - seas_std=numpy.transpose(numpy.nanstd(masked_T2M,axis=2,ddof=1)) - seas_skew=numpy.transpose(scipy.stats.skew(masked_T2M,axis=2)) + # Mask temperature array by cloning 2D lon-lat mask array output from Mask_Region function to size of + # temperature array in time dimension + masked_T2M = var_data * (numpy.repeat(mask_region[:, :, numpy.newaxis], var_data.shape[2], axis=2)) + seas_mean = numpy.transpose(numpy.nanmean(masked_T2M, 2)) + seas_std = numpy.transpose(numpy.nanstd(masked_T2M, axis=2, ddof=1)) + seas_skew = numpy.transpose(scipy.stats.skew(masked_T2M, axis=2)) print("...Computed!") - return seas_mean,seas_std,seas_skew,lon,lat + return seas_mean, seas_std, seas_skew, lon, lat + # ====================================================================== -### Moments_Plot -### Plot mathematical moments of temperature distribution +# Moments_Plot +# Plot mathematical moments of temperature distribution # ----- model_netcdf_filename is data filename and lon_var is longitude string to read in longitude array # ----- lat is latitude array output from Seasonal_Moments function above # ----- monthstr is string referring to months of seasonal analysis @@ -158,75 +163,79 @@ def Seasonal_Moments(model_netcdf_filename,lon_var,lat_var,field_var,time_var,mo # ----- titles and data are arrays of moments being plotted, computed from Seasonal_Moments function above # ----- tickrange and var_units are plotting parameters specified in usp.py file # ----- fig_dir and fig_name are location to save figure output -def Moments_Plot(model_netcdf_filename,lon_var,lat,monthstr,cmaps,titles,data,tickrange,var_units,fig_dir,fig_name): +def Moments_Plot(model_netcdf_filename, lon_var, lat, monthstr, cmaps, titles, data, tickrange, var_units, fig_dir, + fig_name): print(" Plotting seasonal temperature moments...") - fig=mplt.figure(figsize=(11,13)) + fig = mplt.figure(figsize=(11, 13)) - ### Align latitudes with land borders - lat=lat - numpy.true_divide((lat[2]-lat[1]),2) - for idata in numpy.arange(0,len(cmaps)): - ax=fig.add_subplot(int('31'+str(idata+1)),projection=cartopy.crs.PlateCarree()) - ax.set_extent([-180,180,-60,90], crs=ax.projection) + # Align latitudes with land borders + lat = lat - numpy.true_divide((lat[2] - lat[1]), 2) + for idata in numpy.arange(0, len(cmaps)): + ax = fig.add_subplot(int('31' + str(idata + 1)), projection=cartopy.crs.PlateCarree()) + ax.set_extent([-180, 180, -60, 90], crs=ax.projection) - ### Read in longitude directly from model and use shiftdata function to avoid wrapping while plotting - var_netcdf=Dataset(model_netcdf_filename,"r") - lon=numpy.asarray(var_netcdf.variables[lon_var][:],dtype="float") - if lon[lon>180].size>0: #0 to 360 grid - data_plt,lon = shiftgrid(180.,data[idata],lon,start=False) + # Read in longitude directly from model and use shiftdata function to avoid wrapping while plotting + var_netcdf = Dataset(model_netcdf_filename, "r") + lon = numpy.asarray(var_netcdf.variables[lon_var][:], dtype="float") + if lon[lon > 180].size > 0: # 0 to 360 grid + data_plt, lon = shiftgrid(180., data[idata], lon, start=False) else: - data_plt=data[idata] - lon=lon - numpy.true_divide((lon[2]-lon[1]),2) + data_plt = data[idata] + lon = lon - numpy.true_divide((lon[2] - lon[1]), 2) - p1=ax.pcolormesh(lon,lat,data_plt,cmap=cmaps[idata],vmin=numpy.min(tickrange[idata]),vmax=numpy.max(tickrange[idata]),linewidth=0,rasterized=True,transform=ax.projection) - ax.add_feature(cartopy.feature.COASTLINE,zorder=10,linewidth=0.7) - ax.add_feature(cartopy.feature.LAKES,zorder=11,linewidth=0.7,edgecolor='k',facecolor='none') - ax.set_title(titles[idata],fontdict={'fontsize': 15, 'fontweight': 'medium'}) + p1 = ax.pcolormesh(lon, lat, data_plt, cmap=cmaps[idata], vmin=numpy.min(tickrange[idata]), + vmax=numpy.max(tickrange[idata]), linewidth=0, rasterized=True, transform=ax.projection) + ax.add_feature(cartopy.feature.COASTLINE, zorder=10, linewidth=0.7) + ax.add_feature(cartopy.feature.LAKES, zorder=11, linewidth=0.7, edgecolor='k', facecolor='none') + ax.set_title(titles[idata], fontdict={'fontsize': 15, 'fontweight': 'medium'}) ax.set_aspect('equal') - ### Create individual colorbars per subplot + # Create individual colorbars per subplot axpos = ax.get_position() axpos0 = axpos.x0 pos_x = axpos0 + axpos.width - 0.05 - cax = inset_axes(ax,width="7%", height="100%",loc='right',bbox_to_anchor=(pos_x,0,0.3,1),bbox_transform=ax.transAxes,borderpad=0) + cax = inset_axes(ax, width="7%", height="100%", loc='right', bbox_to_anchor=(pos_x, 0, 0.3, 1), + bbox_transform=ax.transAxes, borderpad=0) if idata != 2: - cbar=fig.colorbar(p1,cax=cax,label=var_units,orientation='vertical',ticks=tickrange[idata]) + cbar = fig.colorbar(p1, cax=cax, label=var_units, orientation='vertical', ticks=tickrange[idata]) else: - cbar=fig.colorbar(p1,cax=cax,orientation='vertical',ticks=tickrange[idata]) + cbar = fig.colorbar(p1, cax=cax, orientation='vertical', ticks=tickrange[idata]) cbar.set_ticklabels(tickrange[idata]) cbar.ax.tick_params(labelsize=14) - ax.text(0.02, 0.02, monthstr,fontsize=14,transform=ax.transAxes,weight='bold') + ax.text(0.02, 0.02, monthstr, fontsize=14, transform=ax.transAxes, weight='bold') fig.canvas.draw() - fig.savefig(fig_dir+'/'+fig_name, bbox_inches="tight") + fig.savefig(fig_dir + '/' + fig_name, bbox_inches="tight") print("...Completed!") - print(" Figure saved as "+fig_dir+'/'+fig_name+"!") + print(" Figure saved as " + fig_dir + '/' + fig_name + "!") + # ====================================================================== -### shiftgrid -### Shift global lat/lon grid east or west. Taken from Python 2 Basemap function -def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0): - #.. tabularcolumns:: |l|L| - #============== ==================================================== - #Arguments Description - #============== ==================================================== - #lon0 starting longitude for shifted grid +# shiftgrid +# Shift global lat/lon grid east or west. Taken from Python 2 Basemap function +def shiftgrid(lon0, datain, lonsin, start=True, cyclic=360.0): + # .. tabularcolumns:: |l|L| + # ============== ==================================================== + # Arguments Description + # ============== ==================================================== + # lon0 starting longitude for shifted grid # (ending longitude if start=False). lon0 must be on # input grid (within the range of lonsin). - #datain original data with longitude the right-most + # datain original data with longitude the right-most # dimension. - #lonsin original longitudes. - #============== ==================================================== - #.. tabularcolumns:: |l|L| - #============== ==================================================== - #Keywords Description - #============== ==================================================== - #start if True, lon0 represents the starting longitude + # lonsin original longitudes. + # ============== ==================================================== + # .. tabularcolumns:: |l|L| + # ============== ==================================================== + # Keywords Description + # ============== ==================================================== + # start if True, lon0 represents the starting longitude # of the new grid. if False, lon0 is the ending # longitude. Default True. - #cyclic width of periodic domain (default 360) - #============== ==================================================== - #returns ``dataout,lonsout`` (data and longitudes on shifted grid). + # cyclic width of periodic domain (default 360) + # ============== ==================================================== + # returns ``dataout,lonsout`` (data and longitudes on shifted grid). - if numpy.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4: + if numpy.fabs(lonsin[-1] - lonsin[0] - cyclic) > 1.e-4: # Use all data instead of raise ValueError, 'cyclic point not included' start_idx = 0 else: @@ -234,26 +243,26 @@ def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0): start_idx = 1 if lon0 < lonsin[0] or lon0 > lonsin[-1]: raise ValueError('lon0 outside of range of lonsin') - i0 = numpy.argmin(numpy.fabs(lonsin-lon0)) - i0_shift = len(lonsin)-i0 + i0 = numpy.argmin(numpy.fabs(lonsin - lon0)) + i0_shift = len(lonsin) - i0 if numpy.ma.isMA(datain): - dataout = numpy.ma.zeros(datain.shape,datain.dtype) + dataout = numpy.ma.zeros(datain.shape, datain.dtype) else: - dataout = numpy.zeros(datain.shape,datain.dtype) + dataout = numpy.zeros(datain.shape, datain.dtype) if numpy.ma.isMA(lonsin): - lonsout = numpy.ma.zeros(lonsin.shape,lonsin.dtype) + lonsout = numpy.ma.zeros(lonsin.shape, lonsin.dtype) else: - lonsout = numpy.zeros(lonsin.shape,lonsin.dtype) + lonsout = numpy.zeros(lonsin.shape, lonsin.dtype) if start: lonsout[0:i0_shift] = lonsin[i0:] else: - lonsout[0:i0_shift] = lonsin[i0:]-cyclic - dataout[...,0:i0_shift] = datain[...,i0:] + lonsout[0:i0_shift] = lonsin[i0:] - cyclic + dataout[..., 0:i0_shift] = datain[..., i0:] if start: - lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic + lonsout[i0_shift:] = lonsin[start_idx:i0 + start_idx] + cyclic else: - lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx] - dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx] - return dataout,lonsout + lonsout[i0_shift:] = lonsin[start_idx:i0 + start_idx] + dataout[..., i0_shift:] = datain[..., start_idx:i0 + start_idx] + return dataout, lonsout # ====================================================================== diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_SeasonAndTail_usp.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_SeasonAndTail_usp.py index e4b8a92a6..9b53b26d2 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_SeasonAndTail_usp.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_SeasonAndTail_usp.py @@ -15,15 +15,12 @@ # ====================================================================== # START USER SPECIFIED SECTION # ====================================================================== -### Set range of years, season, and tail percentile threshold for calculations -yearbeg=int(os.environ["FIRSTYR"]) -yearend=int(os.environ["LASTYR"]) -monthstr=os.environ["monthstr"] -monthsub=os.environ["monthsub"] -ptile=os.environ["ptile"] -#monthsub=[12,1,2] -#monthstr='JJA' -#ptile=5 +# Set range of years, season, and tail percentile threshold for calculations +yearbeg = int(os.environ["startdate"]) +yearend = int(os.environ["enddate"]) +monthstr = os.environ["monthstr"] +monthsub = os.environ["monthsub"] +ptile = os.environ["ptile"] # ====================================================================== # END USER SPECIFIED SECTION @@ -34,21 +31,20 @@ # DO NOT MODIFY CODE BELOW UNLESS # YOU KNOW WHAT YOU ARE DOING # ====================================================================== -data={} -data["yearbeg"]=yearbeg -data["yearend"]=yearend -data["monthsub"]=monthsub -data["monthstr"]=monthstr -data["ptile"]=ptile +data = {} +data["yearbeg"] = yearbeg +data["yearend"] = yearend +data["monthsub"] = monthsub +data["monthstr"] = monthstr +data["ptile"] = ptile # Taking care of function arguments for calculating shift ratio -data["args1"]=[ \ -yearbeg, \ -yearend, \ -monthsub, \ -monthstr, \ -ptile, \ +data["args1"] = [ + yearbeg, + yearend, + monthsub, + monthstr, + ptile, ] -with open(os.environ["WK_DIR"]+"TempExtDistShape_SeasonAndTail.json", "w") as outfile: +with open(os.environ["WORK_DIR"] + "TempExtDistShape_SeasonAndTail.json", "w") as outfile: json.dump(data, outfile) - diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio.py index 98f474553..2590a7636 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio.py @@ -14,7 +14,8 @@ # This file is part of the Surface Temperature Extremes and Distribution Shape Package # and the MDTF code package. See LICENSE.txt for the license. # -# Computes the shifted underlying-to-Gaussian distribution tail exceedances ratio following Loikith and Neelin (2019), Loikith et al. (2018), Loikith and Neelin (2015), Ruff and Neelin (2012) +# Computes the shifted underlying-to-Gaussian distribution tail exceedances ratio following Loikith and Neelin (2019), +# Loikith et al. (2018), Loikith and Neelin (2015), Ruff and Neelin (2012) # # Generates spatial plot of this shift ratio over season specified as function of two-meter temperature # @@ -43,35 +44,42 @@ print("**************************************************") # ====================================================================== -### Load user-specified parameters (usp) for calcluating and plotting shift ratio +# Load user-specified parameters (usp) for calcluating and plotting shift ratio print("Load user-specified parameters...") -os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_ShiftRatio_usp.py") -with open(os.environ["WK_DIR"]+"/TempExtDistShape_ShiftRatio_parameters.json") as outfile: - ratio_data=json.load(outfile) +os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_ShiftRatio_usp.py") +with open(os.environ["WORK_DIR"] + "/TempExtDistShape_ShiftRatio_parameters.json") as outfile: + ratio_data = json.load(outfile) print("...Loaded!") -monthsub=json.loads(ratio_data["monthsub"]) #change unicode string into array of integers +monthsub = json.loads(ratio_data["monthsub"]) # change unicode string into array of integers # ====================================================================== -### List model filenames for two-meter temperature data -T2Mfile=sorted(glob.glob(ratio_data["MODEL_OUTPUT_DIR"]+"/"+ratio_data["MODEL"]+"*"+ratio_data["T2M_VAR"]+".day.nc"))[0] +# List model filenames for two-meter temperature data +T2Mfile = ( + sorted(glob.glob(ratio_data["MODEL_OUTPUT_DIR"] + "/" + ratio_data["MODEL"] + "*" + ratio_data["T2M_VAR"] + + ".day.nc")))[0] # ====================================================================== -### Load & pre-process region mask +# Load & pre-process region mask # ---- Generate a map of values corresponding to land regions only -msk=Region_Mask(ratio_data["REGION_MASK_DIR"]+'/'+ratio_data["REGION_MASK_FILENAME"],T2Mfile,ratio_data["LON_VAR"],ratio_data["LAT_VAR"]) +msk = Region_Mask(ratio_data["REGION_MASK_DIR"] + '/' + ratio_data["REGION_MASK_FILENAME"], T2Mfile, + ratio_data["LON_VAR"], ratio_data["LAT_VAR"]) # ====================================================================== -### Calculate seasonal anomalies for two-meter temperature -T2Manom_data,lon,lat=Seasonal_Anomalies(T2Mfile,ratio_data["LON_VAR"],ratio_data["LAT_VAR"],ratio_data["T2M_VAR"],ratio_data["TIME_VAR"],monthsub,ratio_data["yearbeg"],ratio_data["yearend"]) +# Calculate seasonal anomalies for two-meter temperature +T2Manom_data, lon, lat = Seasonal_Anomalies(T2Mfile, ratio_data["LON_VAR"], ratio_data["LAT_VAR"], + ratio_data["T2M_VAR"], ratio_data["TIME_VAR"], monthsub, + ratio_data["yearbeg"], ratio_data["yearend"]) # ====================================================================== -### Compute the ratio of Non-Gaussian to Gaussian shifted two-meter temperature distribution tails -shiftratio=ShiftRatio_Calc(ratio_data["ptile"],ratio_data["shift"],msk,T2Manom_data,lon,lat) +# Compute the ratio of Non-Gaussian to Gaussian shifted two-meter temperature distribution tails +shiftratio = ShiftRatio_Calc(ratio_data["ptile"], ratio_data["shift"], msk, T2Manom_data, lon, lat) # ====================================================================== -### Plot the shift ratio computed above and save the figure in wkdir/temp_extremes_distshape/ -ShiftRatio_Plot(T2Mfile,ratio_data["LON_VAR"],ratio_data["COLORMAP_DIR"]+'/'+ratio_data["COLORMAP_FILENAME"],lat,shiftratio,ratio_data["monthstr"],ratio_data["ptile"],ratio_data["FIG_OUTPUT_DIR"],ratio_data["FIG_OUTPUT_FILENAME"]) +# Plot the shift ratio computed above and save the figure in wkdir/temp_extremes_distshape/ +ShiftRatio_Plot(T2Mfile, ratio_data["LON_VAR"], ratio_data["COLORMAP_DIR"] + '/' + ratio_data["COLORMAP_FILENAME"], lat, + shiftratio, ratio_data["monthstr"], ratio_data["ptile"], ratio_data["FIG_OUTPUT_DIR"], + ratio_data["FIG_OUTPUT_FILENAME"]) # ====================================================================== diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_usp.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_usp.py index 6f025b6f0..1a1f501ee 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_usp.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_usp.py @@ -14,41 +14,41 @@ # ====================================================================== # START USER SPECIFIED SECTION # ====================================================================== -### Model name and output directory -MODEL=os.environ["CASENAME"] -MODEL_OUTPUT_DIR=os.environ["DATADIR"]+"/day" - -### Variable Names -T2M_VAR=os.environ["tas_var"] -TIME_VAR=os.environ["time_coord"] -LAT_VAR=os.environ["lat_coord"] -LON_VAR=os.environ["lon_coord"] - -### Set range of years, season, and tail percentile threshold for calculations -yearbeg=int(os.environ["FIRSTYR"]) -yearend=int(os.environ["LASTYR"]) -monthstr=os.environ["monthstr"] -monthsub=os.environ["monthsub"] -ptile=int(os.environ["ptile"]) - -### Region mask directory & filename -REGION_MASK_DIR=os.environ["OBS_DATA"] -REGION_MASK_FILENAME="MERRA2_landmask.mat" - -### Colormap directory & filename for plotting -COLORMAP_DIR=os.environ["OBS_DATA"] -COLORMAP_FILENAME="ShiftRatio_cmaps.mat" - -### Set shift for non-Gaussian tail calculations -shift=0.5 - -### Save figure to filename/directory -FIG_OUTPUT_DIR=os.environ["WK_DIR"]+"/model/PS" -FIG_OUTPUT_FILENAME="ShiftRatio_"+monthstr+"_"+str(ptile)+".ps" - -### Reanalysis output figure for comparisons -FIG_OBS_DIR=os.environ["WK_DIR"]+"/obs/PS" -FIG_OBS_FILENAME="MERRA2_198001-200912_res=0.5-0.66.ShiftRatio_"+monthstr+"_"+str(ptile)+".png" +# Model name and output directory +MODEL = os.environ["CASENAME"] +MODEL_OUTPUT_DIR = os.environ["DATADIR" + "/day"] + +# Variable Names +T2M_VAR = os.environ["tas_var"] +TIME_VAR = os.environ["time_coord"] +LAT_VAR = os.environ["lat_coord"] +LON_VAR = os.environ["lon_coord"] + +# Set range of years, season, and tail percentile threshold for calculations +yearbeg = int(os.environ["startdate"]) +yearend = int(os.environ["enddate"]) +monthstr = os.environ["monthstr"] +monthsub = os.environ["monthsub"] +ptile = int(os.environ["ptile"]) + +# Region mask directory & filename +REGION_MASK_DIR = os.environ["OBS_DATA"] +REGION_MASK_FILENAME = "MERRA2_landmask.mat" + +# Colormap directory & filename for plotting +COLORMAP_DIR = os.environ["OBS_DATA"] +COLORMAP_FILENAME = "ShiftRatio_cmaps.mat" + +# Set shift for non-Gaussian tail calculations +shift = 0.5 + +# Save figure to filename/directory +FIG_OUTPUT_DIR = os.environ["WORK_DIR"] + "/model/PS" +FIG_OUTPUT_FILENAME = "ShiftRatio_" + monthstr + "_" + str(ptile) + ".ps" + +# Reanalysis output figure for comparisons +FIG_OBS_DIR = os.environ["WORK_DIR"] + "/obs/PS" +FIG_OBS_FILENAME = "MERRA2_198001-200912_res=0.5-0.66.ShiftRatio_" + monthstr + "_" + str(ptile) + ".png" # ====================================================================== # END USER SPECIFIED SECTION @@ -58,56 +58,55 @@ # ====================================================================== # DO NOT MODIFY CODE BELOW # ====================================================================== -data={} +data = {} -data["MODEL"]=MODEL -data["MODEL_OUTPUT_DIR"]=MODEL_OUTPUT_DIR -data["REGION_MASK_DIR"]=REGION_MASK_DIR -data["REGION_MASK_FILENAME"]=REGION_MASK_FILENAME -data["FIG_OUTPUT_DIR"]=FIG_OUTPUT_DIR -data["FIG_OUTPUT_FILENAME"]=FIG_OUTPUT_FILENAME -data["FIG_OBS_DIR"]=FIG_OBS_DIR -data["FIG_OBS_FILENAME"]=FIG_OBS_FILENAME +data["MODEL"] = MODEL +data["MODEL_OUTPUT_DIR"] = MODEL_OUTPUT_DIR +data["REGION_MASK_DIR"] = REGION_MASK_DIR +data["REGION_MASK_FILENAME"] = REGION_MASK_FILENAME +data["FIG_OUTPUT_DIR"] = FIG_OUTPUT_DIR +data["FIG_OUTPUT_FILENAME"] = FIG_OUTPUT_FILENAME +data["FIG_OBS_DIR"] = FIG_OBS_DIR +data["FIG_OBS_FILENAME"] = FIG_OBS_FILENAME -data["COLORMAP_DIR"]=COLORMAP_DIR -data["COLORMAP_FILENAME"]=COLORMAP_FILENAME +data["COLORMAP_DIR"] = COLORMAP_DIR +data["COLORMAP_FILENAME"] = COLORMAP_FILENAME -data["yearbeg"]=yearbeg -data["yearend"]=yearend -data["monthsub"]=monthsub -data["monthstr"]=monthstr -data["ptile"]=ptile +data["yearbeg"] = yearbeg +data["yearend"] = yearend +data["monthsub"] = monthsub +data["monthstr"] = monthstr +data["ptile"] = ptile -data["TIME_VAR"]=TIME_VAR -data["LAT_VAR"]=LAT_VAR -data["LON_VAR"]=LON_VAR -data["T2M_VAR"]=T2M_VAR +data["TIME_VAR"] = TIME_VAR +data["LAT_VAR"] = LAT_VAR +data["LON_VAR"] = LON_VAR +data["T2M_VAR"] = T2M_VAR -data["shift"]=shift +data["shift"] = shift # Taking care of function arguments for calculating shift ratio -data["args1"]=[ \ -shift, \ -REGION_MASK_DIR, \ -REGION_MASK_FILENAME, \ -COLORMAP_DIR, \ -COLORMAP_FILENAME, \ -MODEL_OUTPUT_DIR, \ -MODEL, \ -yearbeg, \ -yearend, \ -monthsub, \ -monthstr, \ -ptile, \ -FIG_OUTPUT_FILENAME, \ -FIG_OUTPUT_DIR, \ -FIG_OBS_DIR, \ -FIG_OBS_FILENAME, \ -TIME_VAR, \ -T2M_VAR, \ -LAT_VAR, \ -LON_VAR ] - -with open(os.environ["WK_DIR"]+"/TempExtDistShape_ShiftRatio_parameters.json", "w") as outfile: +data["args1"] = [ + shift, + REGION_MASK_DIR, + REGION_MASK_FILENAME, + COLORMAP_DIR, + COLORMAP_FILENAME, + MODEL_OUTPUT_DIR, + MODEL, + yearbeg, + yearend, + monthsub, + monthstr, + ptile, + FIG_OUTPUT_FILENAME, + FIG_OUTPUT_DIR, + FIG_OBS_DIR, + FIG_OBS_FILENAME, + TIME_VAR, + T2M_VAR, + LAT_VAR, + LON_VAR] + +with open(os.environ["WORK_DIR"] + "/TempExtDistShape_ShiftRatio_parameters.json", "w") as outfile: json.dump(data, outfile) - diff --git a/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_util.py b/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_util.py index e8e6edbdf..7007690d5 100644 --- a/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_util.py +++ b/diagnostics/temp_extremes_distshape/TempExtDistShape_ShiftRatio_util.py @@ -20,6 +20,7 @@ from netCDF4 import Dataset import cftime import matplotlib + matplotlib.use('Agg') import matplotlib.pyplot as mplt import matplotlib.colors as mcolors @@ -29,201 +30,216 @@ from scipy.interpolate import NearestNDInterpolator import cartopy + # ====================================================================== -### Region_Mask -### Generate a map of values corresponding to land regions in the file MERRA2_landmask.mat -# ----- region_mask_filename and model_netcdf_filename are string names referring to directory locations of mask and model data +# Region_Mask +# Generate a map of values corresponding to land regions in the file MERRA2_landmask.mat +# ----- region_mask_filename and model_netcdf_filename are string names referring to directory +# locations of mask and model data # ----- lon_var, lat_var are longitude and latitude variable names for reading in model data # --------- Output is mask of region -def Region_Mask(region_mask_filename,model_netcdf_filename,lon_var,lat_var): - ### Load & pre-process region mask + +def Region_Mask(region_mask_filename, model_netcdf_filename, lon_var, lat_var): + # Load & pre-process region mask print(" Generating region mask...") - matfile=scipy.io.loadmat(region_mask_filename) - lat_m=matfile["lats"] - lon_m=matfile["lons"] - msk=matfile["mask"] - lon_m=numpy.append(lon_m,numpy.reshape(lon_m[0,:],(-1,1))+360,0) - lon_m=numpy.append(numpy.reshape(lon_m[-2,:],(-1,1))-360,lon_m,0) - msk=numpy.append(msk,numpy.reshape(msk[0,:],(-1,lat_m.size)),0) - msk=numpy.append(numpy.reshape(msk[-2,:],(-1,lat_m.size)),msk,0) - LAT,LON=numpy.meshgrid(lat_m,lon_m,sparse=False,indexing="xy") - LAT=numpy.reshape(LAT,(-1,1)) - LON=numpy.reshape(LON,(-1,1)) - mask_region=numpy.reshape(msk,(-1,1)) - LATLON=numpy.squeeze(numpy.array((LAT,LON))) - LATLON=LATLON.transpose() - regMaskInterpolator=NearestNDInterpolator(LATLON,mask_region) - - ### Interpolate Region Mask onto Model Grid using Nearest Grid Value - t2m_netcdf=Dataset(model_netcdf_filename,"r") - lon=numpy.asarray(t2m_netcdf.variables[lon_var][:],dtype="float") - lat=numpy.asarray(t2m_netcdf.variables[lat_var][:],dtype="float") + matfile = scipy.io.loadmat(region_mask_filename) + lat_m = matfile["lats"] + lon_m = matfile["lons"] + msk = matfile["mask"] + lon_m = numpy.append(lon_m, numpy.reshape(lon_m[0, :], (-1, 1)) + 360, 0) + lon_m = numpy.append(numpy.reshape(lon_m[-2, :], (-1, 1)) - 360, lon_m, 0) + msk = numpy.append(msk, numpy.reshape(msk[0, :], (-1, lat_m.size)), 0) + msk = numpy.append(numpy.reshape(msk[-2, :], (-1, lat_m.size)), msk, 0) + LAT, LON = numpy.meshgrid(lat_m, lon_m, sparse=False, indexing="xy") + LAT = numpy.reshape(LAT, (-1, 1)) + LON = numpy.reshape(LON, (-1, 1)) + mask_region = numpy.reshape(msk, (-1, 1)) + LATLON = numpy.squeeze(numpy.array((LAT, LON))) + LATLON = LATLON.transpose() + regMaskInterpolator = NearestNDInterpolator(LATLON, mask_region) + + # Interpolate Region Mask onto Model Grid using Nearest Grid Value + t2m_netcdf = Dataset(model_netcdf_filename, "r") + lon = numpy.asarray(t2m_netcdf.variables[lon_var][:], dtype="float") + lat = numpy.asarray(t2m_netcdf.variables[lat_var][:], dtype="float") t2m_netcdf.close() - ### Fix longitudes so values range from -180 to 180 - if lon[lon>180].size>0: - lon[lon>180]=lon[lon>180]-360 - LAT,LON=numpy.meshgrid(lat,lon,sparse=False,indexing="xy") - LAT=numpy.reshape(LAT,(-1,1)) - LON=numpy.reshape(LON,(-1,1)) - LATLON=numpy.squeeze(numpy.array((LAT,LON))) - LATLON=LATLON.transpose() - mask_region=numpy.zeros(LAT.size) + # Fix longitudes so values range from -180 to 180 + if lon[lon > 180].size > 0: + lon[lon > 180] = lon[lon > 180] - 360 + LAT, LON = numpy.meshgrid(lat, lon, sparse=False, indexing="xy") + LAT = numpy.reshape(LAT, (-1, 1)) + LON = numpy.reshape(LON, (-1, 1)) + LATLON = numpy.squeeze(numpy.array((LAT, LON))) + LATLON = LATLON.transpose() + mask_region = numpy.zeros(LAT.size) for latlon_idx in numpy.arange(mask_region.shape[0]): - mask_region[latlon_idx]=regMaskInterpolator(LATLON[latlon_idx,:]) - mask_region=numpy.reshape(mask_region,(-1,lat.size)) - mask_region[mask_region!=1]=numpy.nan + mask_region[latlon_idx] = regMaskInterpolator(LATLON[latlon_idx, :]) + mask_region = numpy.reshape(mask_region, (-1, lat.size)) + mask_region[mask_region != 1] = numpy.nan print("...Generated!") return mask_region + # ====================================================================== -### Seasonal_Anomalies -### Read in two-meter temperature variable from netcdf file and compute seasonal anomalies +# Seasonal_Anomalies +# Read in two-meter temperature variable from netcdf file and compute seasonal anomalies # ----- model_netcdf_filename is string name of directory location of netcdf file to be opened # ----- lon_var,lat_var,field_var,time_var are string names of longitude, latitude, variable, and time in netcdf file # ----- monthsub is array of months (integers) for seasonal analysis # ----- yearbeg and yearend of range of years for analysis # --------- Output is two-meter temperature seasonal anomalies, longitude, and latitude arrays -def Seasonal_Anomalies(model_netcdf_filename,lon_var,lat_var,field_var,time_var,monthsub,yearbeg,yearend): - var_netcdf=Dataset(model_netcdf_filename,"r") - lat=numpy.asarray(var_netcdf.variables[lat_var][:],dtype="float") - lon=numpy.asarray(var_netcdf.variables[lon_var][:],dtype="float") - var_data=numpy.asarray(var_netcdf.variables[field_var][:],dtype="float") #time, lat, lon - datatime=numpy.asarray(var_netcdf.variables[time_var][:],dtype="float") - timeunits=var_netcdf.variables[time_var].units - varunits=var_netcdf.variables[field_var].units - caltype=var_netcdf.variables[time_var].calendar +def Seasonal_Anomalies(model_netcdf_filename, lon_var, lat_var, field_var, time_var, monthsub, yearbeg, yearend): + var_netcdf = Dataset(model_netcdf_filename, "r") + lat = numpy.asarray(var_netcdf.variables[lat_var][:], dtype="float") + lon = numpy.asarray(var_netcdf.variables[lon_var][:], dtype="float") + var_data = numpy.asarray(var_netcdf.variables[field_var][:], dtype="float") # time, lat, lon + datatime = numpy.asarray(var_netcdf.variables[time_var][:], dtype="float") + timeunits = var_netcdf.variables[time_var].units + caltype = var_netcdf.variables[time_var].calendar var_netcdf.close() - ### Fix longitudes so values range from -180 to 180 - if lon[lon>180].size>0: - lon[lon>180]=lon[lon>180]-360 + # Fix longitudes so values range from -180 to 180 + if lon[lon > 180].size > 0: + lon[lon > 180] = lon[lon > 180] - 360 - ### Reshape data to [lon, lat, time] dimensions for code to run properly + # Reshape data to [lon, lat, time] dimensions for code to run properly if len(var_data.shape) == 4: - var_data=numpy.squeeze(var_data) - if var_data.shape == (len(lon),len(lat),len(datatime)): - var_data=var_data - elif var_data.shape == (len(lat),len(datatime),len(lon)): - var_data=numpy.transpose(var_data,(2,0,1)) - elif var_data.shape == (len(lon),len(datatime),len(lat)): - var_data=numpy.transpose(var_data,(0,2,1)) - elif var_data.shape == (len(datatime),len(lon),len(lat)): - var_data=numpy.transpose(var_data,(1,2,0)) - elif var_data.shape == (len(lat),len(lon),len(datatime)): - var_data=numpy.transpose(var_data,(1,0,2)) - elif var_data.shape == (len(datatime),len(lat),len(lon)): - var_data=numpy.transpose(var_data,(2,1,0)) - - ### Subset temperature to time range specified by "yearbeg,yearend" values - datecf=[cftime.num2date(t,units=timeunits,calendar=caltype) for t in datatime] - date= numpy.array([T.strftime('%Y-%m-%d') for T in list(datecf)]) #this converts the arrays of timesteps output above to a more readable string format - mo=numpy.array([int('{0.month:02d}'.format(t)) for t in list(datecf)]) - yr=numpy.array([int('{0.year:04d}'.format(t)) for t in list(datecf)]) + var_data = numpy.squeeze(var_data) + if var_data.shape == (len(lon), len(lat), len(datatime)): + var_data = var_data + elif var_data.shape == (len(lat), len(datatime), len(lon)): + var_data = numpy.transpose(var_data, (2, 0, 1)) + elif var_data.shape == (len(lon), len(datatime), len(lat)): + var_data = numpy.transpose(var_data, (0, 2, 1)) + elif var_data.shape == (len(datatime), len(lon), len(lat)): + var_data = numpy.transpose(var_data, (1, 2, 0)) + elif var_data.shape == (len(lat), len(lon), len(datatime)): + var_data = numpy.transpose(var_data, (1, 0, 2)) + elif var_data.shape == (len(datatime), len(lat), len(lon)): + var_data = numpy.transpose(var_data, (2, 1, 0)) + + # Subset temperature to time range specified by "yearbeg,yearend" values + datecf = [cftime.num2date(t, units=timeunits, calendar=caltype) for t in datatime] + mo = numpy.array([int('{0.month:02d}'.format(t)) for t in list(datecf)]) + yr = numpy.array([int('{0.year:04d}'.format(t)) for t in list(datecf)]) leapstr = numpy.array(['{0.month:2d}-{0.day:2d}'.format(t) for t in list(datecf)]) - yearind = numpy.where(numpy.logical_and(yr>=yearbeg, yr<=yearend))[0] - var_data=var_data[:,:,yearind] - datecf=numpy.array(datecf) - datecf=datecf[yearind] - leapstr=leapstr[yearind] - mo=mo[yearind] - yr=yr[yearind] - - ### Subset temperature to season specified by "monthsub" vector - moinds=numpy.in1d(mo,monthsub) - moinds=(numpy.where(moinds)[0]) - moinds=[int(indval) for indval in moinds] - leapstr=leapstr[moinds] - var_data=var_data[:,:,moinds] - datecf=datecf[moinds] - - ### Remove leap days - dateind=(leapstr != '02-29') - leapstr=leapstr[dateind] - var_data=var_data[:,:,dateind] - datecf=datecf[dateind] - - ### Compute temperature anomaly - days_uniq=numpy.unique(leapstr) - var_anom=numpy.empty(var_data.shape) - dayinds=[numpy.where(leapstr==dd)[0] for dd in days_uniq] - for begval in numpy.arange(0,len(dayinds)): - temp_clim=numpy.mean(var_data[:,:,dayinds[begval]],axis=2) - temp_clim=temp_clim.reshape(temp_clim.shape[0],temp_clim.shape[1], 1) - var_anom[:,:,dayinds[begval]]=var_data[:,:,dayinds[begval]]-temp_clim - return var_anom,lon,lat + yearind = numpy.where(numpy.logical_and(yr >= yearbeg, yr <= yearend))[0] + var_data = var_data[:, :, yearind] + datecf = numpy.array(datecf) + datecf = datecf[yearind] + leapstr = leapstr[yearind] + mo = mo[yearind] + + # Subset temperature to season specified by "monthsub" vector + moinds = numpy.in1d(mo, monthsub) + moinds = (numpy.where(moinds)[0]) + moinds = [int(indval) for indval in moinds] + leapstr = leapstr[moinds] + var_data = var_data[:, :, moinds] + datecf = datecf[moinds] + + # Remove leap days + dateind = (leapstr != '02-29') + leapstr = leapstr[dateind] + var_data = var_data[:, :, dateind] + + # Compute temperature anomaly + days_uniq = numpy.unique(leapstr) + var_anom = numpy.empty(var_data.shape) + dayinds = [numpy.where(leapstr == dd)[0] for dd in days_uniq] + for begval in numpy.arange(0, len(dayinds)): + temp_clim = numpy.mean(var_data[:, :, dayinds[begval]], axis=2) + temp_clim = temp_clim.reshape(temp_clim.shape[0], temp_clim.shape[1], 1) + var_anom[:, :, dayinds[begval]] = var_data[:, :, dayinds[begval]] - temp_clim + return var_anom, lon, lat + # ====================================================================== -### ShiftRatio_Calc -### Compute shift ratio of Non-Gaussian to Gaussian distribution tails specified using "ptile" percentile +# ShiftRatio_Calc +# Compute shift ratio of Non-Gaussian to Gaussian distribution tails specified using "ptile" percentile # ----- ptile is percentile to define tail of distribution of interest # ----- shift is the value used to shift the distribution as a warming scenario # ----- msk is output from Region_Mask function, masking to land grid cells # ----- T2Manom_data is two-meter temperature anomaly data output from Seasonal_Anomalies function above # ----- lon and lat are longitude and latitude arrays output from Seasonal_Anomalies function above # --------- Output is global shift ratio array -def ShiftRatio_Calc(ptile,shift,msk,T2Manom_data,lon,lat): +def ShiftRatio_Calc(ptile, shift, msk, T2Manom_data, lon, lat): print(" Computing underlying-to-Gaussian distribution shift ratio...") - ### Detrend temperature anomaly data output from Seasonal_Anomalies function - T2Manom_data=signal.detrend(T2Manom_data,axis=2,type='linear') + # Detrend temperature anomaly data output from Seasonal_Anomalies function + T2Manom_data = signal.detrend(T2Manom_data, axis=2, type='linear') - ### Mask temperature array by cloning 2D lon-lat mask array output from Mask_Region function to size of temperature array in time dimension - masked_T2M_anom = T2Manom_data*(numpy.repeat(msk[:,:,numpy.newaxis], T2Manom_data.shape[2], axis=2)) + # Mask temperature array by cloning 2D lon-lat mask array output from Mask_Region function to size of + # temperature array in time dimension + masked_T2M_anom = T2Manom_data * (numpy.repeat(msk[:, :, numpy.newaxis], T2Manom_data.shape[2], axis=2)) - ### Extract the "ptile" percentile of the temperature anomaly distribution - pthresh=numpy.percentile(masked_T2M_anom,ptile,axis=2,interpolation='midpoint') #size lon-lat; midpoint to match matlab percentile function interpolation scheme + # Extract the "ptile" percentile of the temperature anomaly distribution + # size lon-lat; midpoint to match matlab percentile function interpolation scheme + pthresh = numpy.percentile(masked_T2M_anom, ptile, axis=2, + interpolation='midpoint') - ### Compute number of days exceeding pthresh after shift - # ----- Loop through each grid cell where 'thrshold[iloncell,ilatcell]' is the percentile threshold 'pthresh' of the two-meter temperature anomaly distribution at grid cell defined by its longitude-latitude coordinate + # Compute number of days exceeding pthresh after shift + # ----- Loop through each grid cell where 'thrshold[iloncell,ilatcell]' is the percentile + # threshold 'pthresh' of the two-meter temperature anomaly distribution at grid cell + # defined by its longitude-latitude coordinate if ptile < 50: - exceedances=numpy.array([[len(numpy.where((masked_T2M_anom[iloncell,ilatcell,:]+shift*numpy.std(masked_T2M_anom[iloncell,ilatcell,:],ddof=1)) 50: - exceedances=numpy.array([[len(numpy.where((masked_T2M_anom[iloncell,ilatcell,:]+shift*numpy.std(masked_T2M_anom[iloncell,ilatcell,:],ddof=1))>pthresh[iloncell,ilatcell])[0]) if ~numpy.isnan(pthresh[iloncell,ilatcell]) else numpy.nan for ilatcell in numpy.arange(0,len(lat))] for iloncell in numpy.arange(0,len(lon))]) - - ### Convert exceedances into percentages by dividing by total number of days and multiplying by 100 - exceedances=numpy.divide(exceedances,masked_T2M_anom.shape[2])*100 - - ### Set zeros to NaNs - exceedances=exceedances.astype(float) - exceedances[exceedances==0]=numpy.nan - - ### Draw random samples from Gaussian distribution the length of the time dimension, and repeat 10000 times - # ----- Compute 5th & 95th percentiles of random gaussian distribution shift to determine statistical significance of shift ratio - gauss_exceedances=[] - for reps in numpy.arange(0,10000): - randsamp=numpy.random.randn(masked_T2M_anom.shape[2]) - randsamp_shift=randsamp+(numpy.std(randsamp,ddof=1)*shift) - gauss_pthresh=numpy.percentile(randsamp,ptile,interpolation='midpoint') + exceedances = numpy.array([[len(numpy.where((masked_T2M_anom[iloncell, ilatcell, :] + shift * numpy.std( + masked_T2M_anom[iloncell, ilatcell, :], ddof=1)) > pthresh[iloncell, ilatcell])[0]) if ~numpy.isnan( + pthresh[iloncell, ilatcell]) else numpy.nan for ilatcell in numpy.arange(0, len(lat))] for iloncell in + numpy.arange(0, len(lon))]) + + # Convert exceedances into percentages by dividing by total number of days and multiplying by 100 + exceedances = numpy.divide(exceedances, masked_T2M_anom.shape[2]) * 100 + + # Set zeros to NaNs + exceedances = exceedances.astype(float) + exceedances[exceedances == 0] = numpy.nan + + # Draw random samples from Gaussian distribution the length of the time dimension, and repeat 10000 times + # ----- Compute 5th & 95th percentiles of random gaussian distribution shift to determine statistical + # significance of shift ratio + gauss_exceedances = [] + for reps in numpy.arange(0, 10000): + randsamp = numpy.random.randn(masked_T2M_anom.shape[2]) + randsamp_shift = randsamp + (numpy.std(randsamp, ddof=1) * shift) + gauss_pthresh = numpy.percentile(randsamp, ptile, interpolation='midpoint') if ptile < 50: - excd_inds=randsamp_shift[randsamp_shift 50: - excd_inds=randsamp_shift[randsamp_shift>gauss_pthresh] - gauss_exceedances.append(numpy.true_divide(len(excd_inds),len(randsamp))) - gaussp5=numpy.percentile(gauss_exceedances,5,interpolation='midpoint')*100 - gaussp95=numpy.percentile(gauss_exceedances,95,interpolation='midpoint')*100 - gaussp50=numpy.percentile(gauss_exceedances,50,interpolation='midpoint')*100 + excd_inds = randsamp_shift[randsamp_shift > gauss_pthresh] + gauss_exceedances.append(numpy.true_divide(len(excd_inds), len(randsamp))) + gaussp5 = numpy.percentile(gauss_exceedances, 5, interpolation='midpoint') * 100 + gaussp95 = numpy.percentile(gauss_exceedances, 95, interpolation='midpoint') * 100 + gaussp50 = numpy.percentile(gauss_exceedances, 50, interpolation='midpoint') * 100 - ### Find where exceedance percentiles are outside the 5th and 95th percentile of the random gaussian distribution + # Find where exceedance percentiles are outside the 5th and 95th percentile of the random gaussian distribution # ----- Where values are not outside the 5th/95th percentiles, set to NaN # ----- Remaining grid cells are statistically significantly different from a Gaussian shift print("### DEBUG ShiftRatio_util") - print("### gaussp5: {}, gaussp95: {}".format(gaussp5,gaussp95)) + print("### gaussp5: {}, gaussp95: {}".format(gaussp5, gaussp95)) print("### exceedances: {} {}; {} bytes".format(exceedances.dtype, exceedances.shape, exceedances.nbytes)) - exceedances[(exceedances>gaussp5)&(exceedances gaussp5) & (exceedances < gaussp95)] = numpy.nan print("### exceedances: {} {}; {} bytes".format(exceedances.dtype, exceedances.shape, exceedances.nbytes)) - ### Compute ratio of exceedances from non-Gaussian shift to median (50th percentile) of shifts from randomly generated Gaussian distributions - shiftratio=numpy.true_divide(exceedances,numpy.ones_like(exceedances)*gaussp50).transpose(1,0) + # Compute ratio of exceedances from non-Gaussian shift to median (50th percentile) of shifts + # from randomly generated Gaussian distributions + shiftratio = numpy.true_divide(exceedances, numpy.ones_like(exceedances) * gaussp50).transpose(1, 0) print("...Computed!") return shiftratio + # ====================================================================== -### ShiftRatio_Plot -### Plot shift ratio of underlying-to-Gaussian distribution tails +# ShiftRatio_Plot +# Plot shift ratio of underlying-to-Gaussian distribution tails # ----- model_netcdf_filename is data filename and lon_var is longitude string to read in longitude array # ----- colormap_file is Matlab file specifying colormap for plotting # ----- lat is latitude array output from Seasonal_Anomalies function above @@ -231,79 +247,86 @@ def ShiftRatio_Calc(ptile,shift,msk,T2Manom_data,lon,lat): # ----- monthstr is string referring to months of seasonal analysis # ----- ptile is percentile to define tail of distribution of interest # ----- fig_dir and fig_name are location to save figure output -def ShiftRatio_Plot(model_netcdf_filename,lon_var,colormap_file,lat,shiftratio,monthstr,ptile,fig_dir,fig_name): +def ShiftRatio_Plot(model_netcdf_filename, lon_var, colormap_file, lat, shiftratio, monthstr, ptile, fig_dir, fig_name): print(" Plotting shift ratio...") if ptile < 50: - cmap_name='ShiftRatio_cold' + cmap_name = 'ShiftRatio_cold' elif ptile > 50: - cmap_name='ShiftRatio_warm' - colormaps=scipy.io.loadmat(colormap_file) - mycmap=mcolors.LinearSegmentedColormap.from_list('my_colormap', colormaps[cmap_name]) - fig=mplt.figure(figsize=(10,10)) - ax=mplt.axes(projection=cartopy.crs.PlateCarree()) - ax.set_extent([-180,180,-60,90], crs=ax.projection) - - ### Read in longitude directly from model and use shiftdata function to avoid wrapping while plotting (and align latitudes with land borders) - var_netcdf=Dataset(model_netcdf_filename,"r") - lon=numpy.asarray(var_netcdf.variables[lon_var][:],dtype="float") - if lon[lon>180].size>0: #0 to 360 grid - shiftratio,lon = shiftgrid(180.,shiftratio,lon,start=False) - lat=lat - numpy.true_divide((lat[2]-lat[1]),2) - lon=lon - numpy.true_divide((lon[2]-lon[1]),2) + cmap_name = 'ShiftRatio_warm' + colormaps = scipy.io.loadmat(colormap_file) + mycmap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colormaps[cmap_name]) + fig = mplt.figure(figsize=(10, 10)) + ax = mplt.axes(projection=cartopy.crs.PlateCarree()) + ax.set_extent([-180, 180, -60, 90], crs=ax.projection) + + ### Read in longitude directly from model and use shiftdata function to avoid wrapping while plotting + # (and align latitudes with land borders) + var_netcdf = Dataset(model_netcdf_filename, "r") + lon = numpy.asarray(var_netcdf.variables[lon_var][:], dtype="float") + if lon[lon > 180].size > 0: # 0 to 360 grid + shiftratio, lon = shiftgrid(180., shiftratio, lon, start=False) + lat = lat - numpy.true_divide((lat[2] - lat[1]), 2) + lon = lon - numpy.true_divide((lon[2] - lon[1]), 2) if ptile < 50: - p1=mplt.pcolormesh(lon,lat,numpy.log10(shiftratio),cmap=mycmap,vmin=numpy.log10(0.125),vmax=numpy.log10(2),transform=ax.projection) + p1 = mplt.pcolormesh(lon, lat, numpy.log10(shiftratio), cmap=mycmap, vmin=numpy.log10(0.125), + vmax=numpy.log10(2), transform=ax.projection) elif ptile > 50: - p1=mplt.pcolormesh(lon,lat,numpy.log10(shiftratio),cmap=mycmap,vmin=numpy.log10(0.5),vmax=numpy.log10(2),transform=ax.projection) + p1 = mplt.pcolormesh(lon, lat, numpy.log10(shiftratio), cmap=mycmap, vmin=numpy.log10(0.5), + vmax=numpy.log10(2), transform=ax.projection) - ### Add coastlines and lake boundaries - ax.add_feature(cartopy.feature.COASTLINE,zorder=1,linewidth=0.7) - ax.add_feature(cartopy.feature.LAKES,zorder=1,linewidth=0.7,edgecolor='k',facecolor='none') + # Add coastlines and lake boundaries + ax.add_feature(cartopy.feature.COASTLINE, zorder=1, linewidth=0.7) + ax.add_feature(cartopy.feature.LAKES, zorder=1, linewidth=0.7, edgecolor='k', facecolor='none') - ### Create individual colorbars per subplot + # Create individual colorbars per subplot ax.set_aspect('equal') - cax2 = inset_axes(ax,width="100%", height="15%",loc='lower center',bbox_to_anchor=(0,-0.05,1,0.3),bbox_transform=ax.transAxes,borderpad=0) + cax2 = inset_axes(ax, width="100%", height="15%", loc='lower center', bbox_to_anchor=(0, -0.05, 1, 0.3), + bbox_transform=ax.transAxes, borderpad=0) if ptile < 50: - cbar=mplt.colorbar(p1,cax=cax2,orientation='horizontal',ticks=[numpy.log10(0.125),numpy.log10(0.25),numpy.log10(0.5),numpy.log10(1),numpy.log10(2)]) - cbar.ax.set_xticklabels(['1/8','1/4','1/2','1','2']) + cbar = mplt.colorbar(p1, cax=cax2, orientation='horizontal', + ticks=[numpy.log10(0.125), numpy.log10(0.25), numpy.log10(0.5), numpy.log10(1), + numpy.log10(2)]) + cbar.ax.set_xticklabels(['1/8', '1/4', '1/2', '1', '2']) elif ptile > 50: - cbar=mplt.colorbar(p1,cax=cax2,orientation='horizontal',ticks=[numpy.log10(0.5),numpy.log10(1),numpy.log10(2)]) - cbar.ax.set_xticklabels(['1/2','1','2']) + cbar = mplt.colorbar(p1, cax=cax2, orientation='horizontal', + ticks=[numpy.log10(0.5), numpy.log10(1), numpy.log10(2)]) + cbar.ax.set_xticklabels(['1/2', '1', '2']) cbar.ax.tick_params(labelsize=20) - ax.text(0.02, 0.02, monthstr,fontsize=14,transform=ax.transAxes,weight='bold') + ax.text(0.02, 0.02, monthstr, fontsize=14, transform=ax.transAxes, weight='bold') fig.canvas.draw() - fig.savefig(fig_dir+'/'+fig_name, bbox_inches="tight") + fig.savefig(fig_dir + '/' + fig_name, bbox_inches="tight") print("...Completed!") - print(" Figure saved as "+fig_dir+'/'+fig_name+"!") + print(" Figure saved as " + fig_dir + '/' + fig_name + "!") + # ====================================================================== -### shiftgrid -### Shift global lat/lon grid east or west. Taken from Python 2 Basemap function -def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0): - - #.. tabularcolumns:: |l|L| - #============== ==================================================== - #Arguments Description - #============== ==================================================== - #lon0 starting longitude for shifted grid +# shiftgrid +# Shift global lat/lon grid east or west. Taken from Python 2 Basemap function +def shiftgrid(lon0, datain, lonsin, start=True, cyclic=360.0): + # .. tabularcolumns:: |l|L| + # ============== ==================================================== + # Arguments Description + # ============== ==================================================== + # lon0 starting longitude for shifted grid # (ending longitude if start=False). lon0 must be on # input grid (within the range of lonsin). - #datain original data with longitude the right-most + # datain original data with longitude the right-most # dimension. - #lonsin original longitudes. - #============== ==================================================== - #.. tabularcolumns:: |l|L| - #============== ==================================================== - #Keywords Description - #============== ==================================================== - #start if True, lon0 represents the starting longitude + # lonsin original longitudes. + # ============== ==================================================== + # .. tabularcolumns:: |l|L| + # ============== ==================================================== + # Keywords Description + # ============== ==================================================== + # start if True, lon0 represents the starting longitude # of the new grid. if False, lon0 is the ending # longitude. Default True. - #cyclic width of periodic domain (default 360) - #============== ==================================================== - #returns ``dataout,lonsout`` (data and longitudes on shifted grid). + # cyclic width of periodic domain (default 360) + # ============== ==================================================== + # returns ``dataout,lonsout`` (data and longitudes on shifted grid). - if numpy.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4: + if numpy.fabs(lonsin[-1] - lonsin[0] - cyclic) > 1.e-4: # Use all data instead of raise ValueError, 'cyclic point not included' start_idx = 0 else: @@ -311,26 +334,26 @@ def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0): start_idx = 1 if lon0 < lonsin[0] or lon0 > lonsin[-1]: raise ValueError('lon0 outside of range of lonsin') - i0 = numpy.argmin(numpy.fabs(lonsin-lon0)) - i0_shift = len(lonsin)-i0 + i0 = numpy.argmin(numpy.fabs(lonsin - lon0)) + i0_shift = len(lonsin) - i0 if numpy.ma.isMA(datain): - dataout = numpy.ma.zeros(datain.shape,datain.dtype) + dataout = numpy.ma.zeros(datain.shape, datain.dtype) else: - dataout = numpy.zeros(datain.shape,datain.dtype) + dataout = numpy.zeros(datain.shape, datain.dtype) if numpy.ma.isMA(lonsin): - lonsout = numpy.ma.zeros(lonsin.shape,lonsin.dtype) + lonsout = numpy.ma.zeros(lonsin.shape, lonsin.dtype) else: - lonsout = numpy.zeros(lonsin.shape,lonsin.dtype) + lonsout = numpy.zeros(lonsin.shape, lonsin.dtype) if start: lonsout[0:i0_shift] = lonsin[i0:] else: - lonsout[0:i0_shift] = lonsin[i0:]-cyclic - dataout[...,0:i0_shift] = datain[...,i0:] + lonsout[0:i0_shift] = lonsin[i0:] - cyclic + dataout[..., 0:i0_shift] = datain[..., i0:] if start: - lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic + lonsout[i0_shift:] = lonsin[start_idx:i0 + start_idx] + cyclic else: - lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx] - dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx] - return dataout,lonsout + lonsout[i0_shift:] = lonsin[start_idx:i0 + start_idx] + dataout[..., i0_shift:] = datain[..., start_idx:i0 + start_idx] + return dataout, lonsout # ====================================================================== diff --git a/diagnostics/temp_extremes_distshape/settings.jsonc b/diagnostics/temp_extremes_distshape/settings.jsonc index bf5bc4122..7d555b27d 100644 --- a/diagnostics/temp_extremes_distshape/settings.jsonc +++ b/diagnostics/temp_extremes_distshape/settings.jsonc @@ -13,8 +13,8 @@ // Human-readable name of the diagnostic. "long_name" : "Surface Temperature Extremes and Distribution Shape", - // Modeling realm. - "realm" : "atmos", + // Data convention expected by POD: cmip (default), cesm, or gfdl + "convention" : "cmip", // Human-readable name of the diagnostic. "description" : "Surface Temperature Extremes and Distribution Shape", @@ -50,8 +50,16 @@ "frequency": "day" }, "dimensions": { - "lat": {"standard_name": "latitude"}, - "lon": {"standard_name": "longitude"}, + "lat": { + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" + }, + "lon": { + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" + }, "time": {"standard_name": "time"}, "plev": { // only used to specify level of zg "standard_name": "air_pressure", @@ -64,18 +72,21 @@ "varlist": { "tas": { "standard_name": "air_temperature", + "realm": "atmos", "units": "K", "dimensions": ["time", "lat", "lon"], "modifier" : "atmos_height" }, "zg": { "standard_name": "geopotential_height", + "realm": "atmos", "units": "m", "dimensions": ["time", "lat", "lon"], "scalar_coordinates": {"plev": 500} }, "psl": { "standard_name": "air_pressure_at_mean_sea_level", + "realm": "atmos", "units": "Pa", "dimensions": ["time", "lat", "lon"] } diff --git a/diagnostics/temp_extremes_distshape/temp_extremes_distshape.html b/diagnostics/temp_extremes_distshape/temp_extremes_distshape.html index a1b710b09..fa0ba01ab 100644 --- a/diagnostics/temp_extremes_distshape/temp_extremes_distshape.html +++ b/diagnostics/temp_extremes_distshape/temp_extremes_distshape.html @@ -2,43 +2,63 @@

Example diagnostic: time-averaged near-surface temperature

-The surface temperature extremes and distribution shape package computes statistics that relate to the shape of the two-meter temperature distribution and its influence on extreme temperature exceedances. These metrics evaluate model fidelity in capturing moments of the temperature distribution and distribution tail properties, as well as the large-scale meteorological patterns associated with extreme temperature exceedance days.Frequency of exceedances of a fixed extreme temperature threshold after a uniform warm shift across the distribution, the simplest prototype for future warming, provides a measure of Gaussianity as well as insight into the complexity of future changes in temperature extremes. +The surface temperature extremes and distribution shape package computes statistics that relate to the shape of the + two-meter temperature distribution and its influence on extreme temperature exceedances. + These metrics evaluate model fidelity in capturing moments of the temperature distribution and distribution + tail properties, as well as the large-scale meteorological patterns associated with extreme temperature exceedance + days.Frequency of exceedances of a fixed extreme temperature threshold after a uniform warm shift across the + distribution, the simplest prototype for future warming, provides a measure of Gaussianity as well as insight into + the complexity of future changes in temperature extremes.

-The three moments of the temperature distribution characterize its shape by measuring the central tendency, spread, and symmetry. +The three moments of the temperature distribution characterize its shape by measuring the central tendency, spread, + and symmetry.

-The shift ratio identifies regions of non-Gaussianity and regions statistically indistinguishable from Gaussian based on a ratio of distribution tail exceedances following the application of a uniform shift (0.5 sigma). +The shift ratio identifies regions of non-Gaussianity and regions statistically indistinguishable from Gaussian based + on a ratio of distribution tail exceedances following the application of a uniform shift (0.5 sigma).

-Normalized frequency distributions of two-meter temperature anomalies provide a closer examination of distribution shape at selected locations exhibiting non-Gaussian behavior in the tail in the MERRA-2 distributions. +Normalized frequency distributions of two-meter temperature anomalies provide a closer examination of distribution + shape at selected locations exhibiting non-Gaussian behavior in the tail in the MERRA-2 distributions.

-Composites of large-scale meteorological patterns lagged from tail exceedance days help diagnose the physical processes underlying the tail behavior. +Composites of large-scale meteorological patterns lagged from tail exceedance days help diagnose the physical + processes underlying the tail behavior.

References:

-Catalano, A. J., P. C. Loikith, and J. D. Neelin, 2020: Evaluating CMIP6 Model Fidelity at Simulating Non-Gaussian Temperature Distribution Tails. Environ. Res. Lett., https://doi.org/10.1088/1748-9326/ab8cd0 +Catalano, A. J., P. C. Loikith, and J. D. Neelin, 2020: Evaluating CMIP6 Model Fidelity at Simulating Non-Gaussian + Temperature Distribution Tails. Environ. Res. Lett., + https://doi.org/10.1088/1748-9326/ab8cd0

-Loikith, P. C., and J. D. Neelin, 2015: Short-tailed temperature distributions over North America and implications for future changes in extremes. Geophys. Res. Lett., 42, https://doi.org/10.1002/2015GL065602 +Loikith, P. C., and J. D. Neelin, 2015: Short-tailed temperature distributions over North America and implications + for future changes in extremes. Geophys. Res. Lett., 42, + https://doi.org/10.1002/2015GL065602

-Loikith, P. C., J. D. Neelin, J. Meyerson, and J. S. Hunter, 2018: Short warm-side temperature distribution tails drive hotspots of warm temperature extreme increases under near-future warming. J. Climate, 31, 9469–9487, https://doi.org/10.1175/JCLI-D-17-0878.1 +Loikith, P. C., J. D. Neelin, J. Meyerson, and J. S. Hunter, 2018: Short warm-side temperature distribution tails + drive hotspots of warm temperature extreme increases under near-future warming. J. Climate, 31, 9469–9487, + https://doi.org/10.1175/JCLI-D-17-0878.1

-Loikith, P. C., and J. D. Neelin, 2019: Non-Gaussian cold-side temperature distribution tails and associated synoptic meteorology. J. Climate, 32, 8399–8414, https://doi.org/10.1175/JCLI-D-19-0344.1 +Loikith, P. C., and J. D. Neelin, 2019: Non-Gaussian cold-side temperature distribution tails and associated + synoptic meteorology. J. Climate, 32, 8399–8414, + https://doi.org/10.1175/JCLI-D-19-0344.1

-Ruff, T. W., and J. D. Neelin, 2012: Long tails in regional surface temperature probability distributions with implications for extremes under global warming. Geophys. Res. Lett., 39, L04704, https://doi.org/10.1029/2011GL050610 +Ruff, T. W., and J. D. Neelin, 2012: Long tails in regional surface temperature probability distributions + with implications for extremes under global warming. Geophys. Res. Lett., 39, L04704, + https://doi.org/10.1029/2011GL050610

- diff --git a/diagnostics/temp_extremes_distshape/temp_extremes_distshape.py b/diagnostics/temp_extremes_distshape/temp_extremes_distshape.py index 8ddf6a61d..65b725b33 100644 --- a/diagnostics/temp_extremes_distshape/temp_extremes_distshape.py +++ b/diagnostics/temp_extremes_distshape/temp_extremes_distshape.py @@ -17,7 +17,8 @@ # (3) Frequency Distributions at Non-Gaussian Tail locations (TempExtDistShape_FreqDist.py) # (4) Composite Circulation at Non-Gaussian Tail locations (TempExtDistShape_CircComps.py) # -# As a module of the MDTF code package, all scripts of this package can be found under /diagnostics/temp_extremes_distshape/** +# As a module of the MDTF code package, all scripts of this package can be found under +# /diagnostics/temp_extremes_distshape/** # and observational data under /inputdata/obs_data/temp_extremes_distshape/** # # This package is written in Python 3, and requires the following Python packages: @@ -52,50 +53,57 @@ # Import standard Python packages import os -##### Functionalities in Surface Temperature Extremes and Distribution Shape Package ##### +# ### Functionalities in Surface Temperature Extremes and Distribution Shape Package ##### -## ====================================================================== -## Moments of Surface Temperature Probability Distribution -## See TempExtDistShape_Moments.py for detailed info +# ====================================================================== +# Moments of Surface Temperature Probability Distribution +# See TempExtDistShape_Moments.py for detailed info try: - os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_Moments.py") + os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_Moments.py") except OSError as e: - print(('WARNING',e.errno,e.strerror)) + print(('WARNING', e.errno, e.strerror)) print("**************************************************") - print("Moments of Surface Temperature Probability Distribution (TempExtDistShape_Moments.py) is NOT Executed as Expected!") + print( + "Moments of Surface Temperature Probability Distribution (TempExtDistShape_Moments.py) is NOT Executed" + " as Expected!") print("**************************************************") -## ====================================================================== -## Shifted Underlying-to-Gaussian Distribution Tail Exceedances Ratio -## See TempExtDistShape_ShiftRatio.py for detailed info +# ====================================================================== +# Shifted Underlying-to-Gaussian Distribution Tail Exceedances Ratio +# See TempExtDistShape_ShiftRatio.py for detailed info try: - os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_ShiftRatio.py") + os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_ShiftRatio.py") except OSError as e: - print(('WARNING',e.errno,e.strerror)) + print(('WARNING', e.errno, e.strerror)) print("**************************************************") - print("Shifted Underlying-to-Gaussian Distribution Tail Exceedances ratio (TempExtDistShape_ShiftRatio.py) is NOT Executed as Expected!") + print( + "Shifted Underlying-to-Gaussian Distribution Tail Exceedances ratio (TempExtDistShape_ShiftRatio.py)" + " is NOT Executed as Expected!") print("**************************************************") -## ====================================================================== -## Frequency Distributions at Non-Gaussian Tail Locations -## See TempExtDistShape_FreqDist.py for detailed info +# ====================================================================== +# Frequency Distributions at Non-Gaussian Tail Locations +# See TempExtDistShape_FreqDist.py for detailed info try: - os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_FreqDist.py") + os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_FreqDist.py") except OSError as e: - print(('WARNING',e.errno,e.strerror)) + print(('WARNING', e.errno, e.strerror)) print("**************************************************") - print("Frequency Distributions at Non-Gaussian Tail Locations (TempExtDistShape_FreqDist.py) is NOT Executed as Expected!") + print( + "Frequency Distributions at Non-Gaussian Tail Locations (TempExtDistShape_FreqDist.py)" + " is NOT Executed as Expected!") print("**************************************************") -## ====================================================================== -## Composite Circulation at Non-Gaussian Tail Locations -## See TempExtDistShape_CircComps.py for detailed info +# ====================================================================== +# Composite Circulation at Non-Gaussian Tail Locations +# See TempExtDistShape_CircComps.py for detailed info try: - os.system("python "+os.environ["POD_HOME"]+"/TempExtDistShape_CircComps.py") + os.system("python " + os.environ["POD_HOME"] + "/TempExtDistShape_CircComps.py") except OSError as e: - print(('WARNING',e.errno,e.strerror)) + print(('WARNING', e.errno, e.strerror)) print("**************************************************") - print("Composite Circulation at Non-Gaussian Tail Locations (TempExtDistShape_CircComps.py) is NOT Executed as Expected!") + print("Composite Circulation at Non-Gaussian Tail Locations (TempExtDistShape_CircComps.py)" + " is NOT Executed as Expected!") print("**************************************************") print("**************************************************") diff --git a/diagnostics/top_heaviness_metric/settings.jsonc b/diagnostics/top_heaviness_metric/settings.jsonc index cd8b86918..b8b368460 100644 --- a/diagnostics/top_heaviness_metric/settings.jsonc +++ b/diagnostics/top_heaviness_metric/settings.jsonc @@ -2,7 +2,7 @@ "settings": { "driver": "top_heaviness_metric.py", "long_name": "Top Heaviness Metric Diagnostic", - "realm": "atmos", + "convention": "cmip", "description": "The vertical profiles of diabatic heating have important implications for large-scale dynamics, especially for the coupling between the large-scale atmospheric circulation and precipitation processes. We adopt an objective approach to examine the top-heaviness of vertical motion, which is closely related to the heating profiles and a commonly available model output variable. The diagnostic/metric can also be used to evaluate the top-heaviness of diabatic heating.", "runtime_requirements": { "python3": ["netCDF4", "xarray", "numpy", "scipy", "matplotlib", "cartopy"] @@ -10,10 +10,14 @@ }, "dimensions": { "lat": { - "standard_name": "latitude" + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" }, "lon": { - "standard_name": "longitude" + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" }, "lev": { "standard_name": "air_pressure", @@ -25,7 +29,7 @@ "varlist": { "omega": { "standard_name": "lagrangian_tendency_of_air_pressure", - "path_variable": "PR_FILE", + "realm": "atmos", "units": "Pa s-1", "dimensions": ["lev", "lat", "lon"] } diff --git a/diagnostics/top_heaviness_metric/top_heaviness_metric.py b/diagnostics/top_heaviness_metric/top_heaviness_metric.py index fd233f01c..d8700d16c 100644 --- a/diagnostics/top_heaviness_metric/top_heaviness_metric.py +++ b/diagnostics/top_heaviness_metric/top_heaviness_metric.py @@ -18,7 +18,8 @@ # # ================================================================================ # Functionality -# 1) calculate the coefficient of Q1 and Q2 (Q1 ~= idealized deep convection profile; Q2 ~= idealized deep stratiform profile); +# 1) calculate the coefficient of Q1 and Q2 (Q1 ~= idealized deep convection profile; Q2 ~= +# idealized deep stratiform profile); # 2) calculate top-heaviness ratio (defined as O2/O1) # # ================================================================================ @@ -43,27 +44,27 @@ import os import glob -missing_file=0 -if len(glob.glob(os.environ["OMEGA_FILE"]))==0: +missing_file = 0 +if len(glob.glob(os.environ["OMEGA_FILE"])) == 0: print("Required OMEGA data missing!") - missing_file=1 + missing_file = 1 -if missing_file==1: +if missing_file == 1: print("Top-heaviness metric diagnostics Package will NOT be executed!") else: try: - os.system("python3 "+os.environ["POD_HOME"]+"/"+"top_heaviness_ratio_calculation.py") + os.system("python3 " + os.environ["POD_HOME"] + "/" + "top_heaviness_ratio_calculation.py") except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) print("**************************************************") print("Top-Heaviness Metric Diagnostics (top_heaviness_ratio_calculation.py) is NOT Executed as Expected!") print("**************************************************") # if the user only focuses on calculating top-heaviess ratio instead of applying some tests on # ratio robustness, the user can choose not to run the following python file. try: - os.system("python3 "+os.environ["POD_HOME"]+"/"+"top_heaviness_ratio_robustness_calc.py") + os.system("python3 " + os.environ["POD_HOME"] + "/" + "top_heaviness_ratio_robustness_calc.py") except OSError as e: - print('WARNING',e.errno,e.strerror) + print('WARNING', e.errno, e.strerror) print("**************************************************") print("Top-Heaviness Metric Diagnostics (top_heaviness_ratio_robustness_calc.py) is NOT Executed as Expected!") print("**************************************************") @@ -71,5 +72,3 @@ print("**************************************************") print("Top-Heaviness Metric Diagnostics Executed!") print("**************************************************") - - diff --git a/diagnostics/top_heaviness_metric/top_heaviness_ratio_calculation.py b/diagnostics/top_heaviness_metric/top_heaviness_ratio_calculation.py index 306a00bb7..c99286fbb 100644 --- a/diagnostics/top_heaviness_metric/top_heaviness_ratio_calculation.py +++ b/diagnostics/top_heaviness_metric/top_heaviness_ratio_calculation.py @@ -1,5 +1,5 @@ # 28 June top_heaviness_ratio_calculation.py -#Python packages/ modules imported for the diagnostic +# Python packages/ modules imported for the diagnostic # the sample monthly data is from ERA5 in July from 2000 to 2019 import os import xarray as xr @@ -9,56 +9,56 @@ import cartopy.crs as ccrs import cartopy.mpl.ticker as cticker - -#Setting variables equal to environment variables set by the diagnostic +# Setting variables equal to environment variables set by the diagnostic lat_coord = os.environ["lat_coord"] lon_coord = os.environ["lon_coord"] lev_coord = os.environ["lev_coord"] -omega_var = os.environ["omega_var"] -WK_DIR = os.environ["WK_DIR"] -OBS_DATA = os.environ["OBS_DATA"] -CASENAME = os.environ["CASENAME"] +omega_var = os.environ["omega_var"] +WK_DIR = os.environ["WORK_DIR"] +OBS_DATA = os.environ["OBS_DATA"] +CASENAME = os.environ["CASENAME"] + -#====================== deriving model output ======================= +# ====================== deriving model output ======================= def top_heaviness_ratio_calculation_model(reanalysis_path, reanalysis_var): # read omega and land-sea fraction data - ds= xr.open_dataset(reanalysis_path) - lev_model=ds[lev_coord].values - lat_model=ds[lat_coord].values - lon_model=ds[lon_coord].values - isort=np.argsort(lev_model)[::-1] # descending - mid_omega_model=ds[reanalysis_var].values # mon x lev x lat x lon; for the sample data (July over 2000-2019) - mid_omega_model=mid_omega_model[:,isort] - #======================deriving O1 and O2======================= + ds = xr.open_dataset(reanalysis_path) + lev_model = ds[lev_coord].values + lat_model = ds[lat_coord].values + lon_model = ds[lon_coord].values + isort = np.argsort(lev_model)[::-1] # descending + mid_omega_model = ds[reanalysis_var].values # mon x lev x lat x lon; for the sample data (July over 2000-2019) + mid_omega_model = mid_omega_model[:, isort] + # ======================deriving O1 and O2======================= # construct Q1_model as half a sine wave and Q2_model as a full sine wave # two base functions; Q1: idealized deep convection profile; Q2: Deep stratiform profile (Back et al. 2017) - Q1_model=np.zeros(len(lev_model)) - Q2_model=np.zeros(len(lev_model)) - dp=lev_model[-1]-lev_model[0] + Q1_model = np.zeros(len(lev_model)) + Q2_model = np.zeros(len(lev_model)) + dp = lev_model[-1] - lev_model[0] for i in range(len(lev_model)): - Q1_model[i]=-np.sin(np.pi*(lev_model[i]-lev_model[0])/dp) - Q2_model[i]=np.sin(2*np.pi*(lev_model[i]-lev_model[0])/dp) - #Normalize - factor=integrate.trapz(Q1_model*Q1_model,lev_model)/dp - Q1_model=Q1_model/np.sqrt(factor) - factor=integrate.trapz(Q2_model*Q2_model,lev_model)/dp - Q2_model=Q2_model/np.sqrt(factor) + Q1_model[i] = -np.sin(np.pi * (lev_model[i] - lev_model[0]) / dp) + Q2_model[i] = np.sin(2 * np.pi * (lev_model[i] - lev_model[0]) / dp) + # Normalize + factor = integrate.trapz(Q1_model * Q1_model, lev_model) / dp + Q1_model = Q1_model / np.sqrt(factor) + factor = integrate.trapz(Q2_model * Q2_model, lev_model) / dp + Q2_model = Q2_model / np.sqrt(factor) # deriving O1 and O2; O1 and O2 are coefs of Q1 and Q2 - mid_omega_model_ltm=np.nanmean(mid_omega_model,axis=0) - O1_model=integrate.trapz(mid_omega_model_ltm*Q1_model[:,None,None],lev_model,axis=0)/dp - O2_model=integrate.trapz(mid_omega_model_ltm*Q2_model[:,None,None],lev_model,axis=0)/dp - #====================== set up figures ======================= - #====================== O1 ======================= - fig, axes = plt.subplots(figsize=(8,4)) - ilat=np.argsort(lat_model) - ilon=np.argsort(lon_model) + mid_omega_model_ltm = np.nanmean(mid_omega_model, axis=0) + O1_model = integrate.trapz(mid_omega_model_ltm * Q1_model[:, None, None], lev_model, axis=0) / dp + O2_model = integrate.trapz(mid_omega_model_ltm * Q2_model[:, None, None], lev_model, axis=0) / dp + # ====================== set up figures ======================= + # ====================== O1 ======================= + fig, axes = plt.subplots(figsize=(8, 4)) + ilat = np.argsort(lat_model) + ilon = np.argsort(lon_model) axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(-0.06,0.07,0.01) - im0 = axes.contourf(lon_model, lat_model, O1_model, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(-0.06, 0.07, 0.01) + im0 = axes.contourf(lon_model, lat_model, O1_model, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_model[ilon][0],lon_model[ilon][-1],60) - lat_grid = np.arange(lat_model[ilat][0],lat_model[ilat][-1],30) + lon_grid = np.arange(lon_model[ilon][0], lon_model[ilon][-1], 60) + lat_grid = np.arange(lat_model[ilat][0], lat_model[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -70,19 +70,19 @@ def top_heaviness_ratio_calculation_model(reanalysis_path, reanalysis_var): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('O1 [Pa/s]',loc='center',fontsize=16) - fig.tight_layout() - fig.savefig(WK_DIR+"/model/"+CASENAME+"_O1.png", format='png',bbox_inches='tight') - #====================== O2 ======================= - fig, axes = plt.subplots(figsize=(8,4)) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('O1 [Pa/s]', loc='center', fontsize=16) + fig.tight_layout() + fig.savefig(WK_DIR + "/model/" + CASENAME + "_O1.png", format='png', bbox_inches='tight') + # ====================== O2 ======================= + fig, axes = plt.subplots(figsize=(8, 4)) axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(-0.06,0.07,0.01) - im0 = axes.contourf(lon_model, lat_model, O2_model, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(-0.06, 0.07, 0.01) + im0 = axes.contourf(lon_model, lat_model, O2_model, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_model[ilon][0],lon_model[ilon][-1],60) - lat_grid = np.arange(lat_model[ilat][0],lat_model[ilat][-1],30) + lon_grid = np.arange(lon_model[ilon][0], lon_model[ilon][-1], 60) + lat_grid = np.arange(lat_model[ilat][0], lat_model[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -94,22 +94,22 @@ def top_heaviness_ratio_calculation_model(reanalysis_path, reanalysis_var): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('O2 [Pa/s]',loc='center',fontsize=16) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('O2 [Pa/s]', loc='center', fontsize=16) fig.tight_layout() - fig.savefig(WK_DIR+"/model/"+CASENAME+"_O2.png", format='png',bbox_inches='tight') - #====================== O2/O1 top-heaviness ratio ======================= - fig, axes = plt.subplots(figsize=(8,4)) - mmid1=O2_model/O1_model - midi=O1_model<0.01 # We only investigate areas with O1 larger than zero - mmid1[midi]=np.nan + fig.savefig(WK_DIR + "/model/" + CASENAME + "_O2.png", format='png', bbox_inches='tight') + # ====================== O2/O1 top-heaviness ratio ======================= + fig, axes = plt.subplots(figsize=(8, 4)) + mmid1 = O2_model / O1_model + midi = O1_model < 0.01 # We only investigate areas with O1 larger than zero + mmid1[midi] = np.nan axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(-0.6,0.7,0.1) - im0 = axes.contourf(lon_model, lat_model, mmid1, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(-0.6, 0.7, 0.1) + im0 = axes.contourf(lon_model, lat_model, mmid1, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_model[ilon][0],lon_model[ilon][-1],60) - lat_grid = np.arange(lat_model[ilat][0],lat_model[ilat][-1],30) + lon_grid = np.arange(lon_model[ilon][0], lon_model[ilon][-1], 60) + lat_grid = np.arange(lat_model[ilat][0], lat_model[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -121,59 +121,58 @@ def top_heaviness_ratio_calculation_model(reanalysis_path, reanalysis_var): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('Top-heaviness Ratio (O2/O1)',loc='center',fontsize=18) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('Top-heaviness Ratio (O2/O1)', loc='center', fontsize=18) fig.tight_layout() - fig.savefig(WK_DIR+"/model/"+CASENAME+"_Top_Heaviness_Ratio.png", format='png',bbox_inches='tight') + fig.savefig(WK_DIR + "/model/" + CASENAME + "_Top_Heaviness_Ratio.png", format='png', bbox_inches='tight') print("Plotting Completed") - -top_heaviness_ratio_calculation_model(os.environ["OMEGA_FILE"],os.environ["omega_var"]) +top_heaviness_ratio_calculation_model(os.environ["OMEGA_FILE"], os.environ["omega_var"]) -#====================== deriving obs output ======================= +# ====================== deriving obs output ======================= # run obs data (ERA5 2000-2019 July only) def top_heaviness_ratio_calculation_obs(obs_data_full_dir): # read omega - ds= xr.open_dataset(obs_data_full_dir) - lev_obs=ds['lev'].values - lat_obs=ds['lat'].values - lon_obs=ds['lon'].values - isort=np.argsort(lev_obs)[::-1] # descending - mid_omega_obs=ds['omega'].values # mon x lev x lat x lon; for the sample data (July over 2000-2019) - mid_omega_obs=mid_omega_obs[:,isort] - #======================deriving O1 and O2======================= + ds = xr.open_dataset(obs_data_full_dir) + lev_obs = ds['lev'].values + lat_obs = ds['lat'].values + lon_obs = ds['lon'].values + isort = np.argsort(lev_obs)[::-1] # descending + mid_omega_obs = ds['omega'].values # mon x lev x lat x lon; for the sample data (July over 2000-2019) + mid_omega_obs = mid_omega_obs[:, isort] + # ======================deriving O1 and O2======================= # construct Q1_obs as half a sine wave and Q2_obs as a full sine wave # two base functions; Q1: idealized deep convection profile; Q2: Deep stratiform profile (Back et al. 2017) - Q1_obs=np.zeros(len(lev_obs)) - Q2_obs=np.zeros(len(lev_obs)) - dp=lev_obs[-1]-lev_obs[0] + Q1_obs = np.zeros(len(lev_obs)) + Q2_obs = np.zeros(len(lev_obs)) + dp = lev_obs[-1] - lev_obs[0] for i in range(len(lev_obs)): - Q1_obs[i]=-np.sin(np.pi*(lev_obs[i]-lev_obs[0])/dp) - Q2_obs[i]=np.sin(2*np.pi*(lev_obs[i]-lev_obs[0])/dp) - #Normalize - factor=integrate.trapz(Q1_obs*Q1_obs,lev_obs)/dp - Q1_obs=Q1_obs/np.sqrt(factor) - factor=integrate.trapz(Q2_obs*Q2_obs,lev_obs)/dp - Q2_obs=Q2_obs/np.sqrt(factor) + Q1_obs[i] = -np.sin(np.pi * (lev_obs[i] - lev_obs[0]) / dp) + Q2_obs[i] = np.sin(2 * np.pi * (lev_obs[i] - lev_obs[0]) / dp) + # Normalize + factor = integrate.trapz(Q1_obs * Q1_obs, lev_obs) / dp + Q1_obs = Q1_obs / np.sqrt(factor) + factor = integrate.trapz(Q2_obs * Q2_obs, lev_obs) / dp + Q2_obs = Q2_obs / np.sqrt(factor) # deriving O1 and O2; O1 and O2 are coefs of Q1 and Q2 - mid_omega_obs_ltm=np.nanmean(mid_omega_obs,axis=0) - O1_obs=integrate.trapz(mid_omega_obs_ltm*Q1_obs[:,None,None],lev_obs,axis=0)/dp - O2_obs=integrate.trapz(mid_omega_obs_ltm*Q2_obs[:,None,None],lev_obs,axis=0)/dp - #====================== set up figures ======================= - #====================== O1 ======================= - fig, axes = plt.subplots(figsize=(8,4)) - ilat=np.argsort(lat_obs) - ilon=np.argsort(lon_obs) + mid_omega_obs_ltm = np.nanmean(mid_omega_obs, axis=0) + O1_obs = integrate.trapz(mid_omega_obs_ltm * Q1_obs[:, None, None], lev_obs, axis=0) / dp + O2_obs = integrate.trapz(mid_omega_obs_ltm * Q2_obs[:, None, None], lev_obs, axis=0) / dp + # ====================== set up figures ======================= + # ====================== O1 ======================= + fig, axes = plt.subplots(figsize=(8, 4)) + ilat = np.argsort(lat_obs) + ilon = np.argsort(lon_obs) axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(-0.06,0.07,0.01) - im0 = axes.contourf(lon_obs, lat_obs, O1_obs, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(-0.06, 0.07, 0.01) + im0 = axes.contourf(lon_obs, lat_obs, O1_obs, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_obs[ilon][0],lon_obs[ilon][-1],60) - lat_grid = np.arange(lat_obs[ilat][0],lat_obs[ilat][-1],30) + lon_grid = np.arange(lon_obs[ilon][0], lon_obs[ilon][-1], 60) + lat_grid = np.arange(lat_obs[ilat][0], lat_obs[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -185,19 +184,19 @@ def top_heaviness_ratio_calculation_obs(obs_data_full_dir): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('O1 [Pa/s]',loc='center',fontsize=16) - fig.tight_layout() - fig.savefig(WK_DIR+"/obs/ERA5_O1_2000_2019_July.png", format='png',bbox_inches='tight') - #====================== O2 ======================= - fig, axes = plt.subplots(figsize=(8,4)) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('O1 [Pa/s]', loc='center', fontsize=16) + fig.tight_layout() + fig.savefig(WK_DIR + "/obs/ERA5_O1_2000_2019_July.png", format='png', bbox_inches='tight') + # ====================== O2 ======================= + fig, axes = plt.subplots(figsize=(8, 4)) axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(-0.06,0.07,0.01) - im0 = axes.contourf(lon_obs, lat_obs, O2_obs, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(-0.06, 0.07, 0.01) + im0 = axes.contourf(lon_obs, lat_obs, O2_obs, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_obs[ilon][0],lon_obs[ilon][-1],60) - lat_grid = np.arange(lat_obs[ilat][0],lat_obs[ilat][-1],30) + lon_grid = np.arange(lon_obs[ilon][0], lon_obs[ilon][-1], 60) + lat_grid = np.arange(lat_obs[ilat][0], lat_obs[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -209,22 +208,22 @@ def top_heaviness_ratio_calculation_obs(obs_data_full_dir): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('O2 [Pa/s]',loc='center',fontsize=16) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('O2 [Pa/s]', loc='center', fontsize=16) fig.tight_layout() - fig.savefig(WK_DIR+"/obs/ERA5_O2_2000_2019_July.png", format='png',bbox_inches='tight') - #====================== O2/O1 top-heaviness ratio ======================= - fig, axes = plt.subplots(figsize=(8,4)) - mmid1=O2_obs/O1_obs - midi=O1_obs<0.01 # We only investigate areas with O1 larger than zero - mmid1[midi]=np.nan + fig.savefig(WK_DIR + "/obs/ERA5_O2_2000_2019_July.png", format='png', bbox_inches='tight') + # ====================== O2/O1 top-heaviness ratio ======================= + fig, axes = plt.subplots(figsize=(8, 4)) + mmid1 = O2_obs / O1_obs + midi = O1_obs < 0.01 # We only investigate areas with O1 larger than zero + mmid1[midi] = np.nan axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(-0.6,0.7,0.1) - im0 = axes.contourf(lon_obs, lat_obs, mmid1, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(-0.6, 0.7, 0.1) + im0 = axes.contourf(lon_obs, lat_obs, mmid1, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_obs[ilon][0],lon_obs[ilon][-1],60) - lat_grid = np.arange(lat_obs[ilat][0],lat_obs[ilat][-1],30) + lon_grid = np.arange(lon_obs[ilon][0], lon_obs[ilon][-1], 60) + lat_grid = np.arange(lat_obs[ilat][0], lat_obs[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -236,11 +235,11 @@ def top_heaviness_ratio_calculation_obs(obs_data_full_dir): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('Top-heaviness Ratio (O2/O1)',loc='center',fontsize=18) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('Top-heaviness Ratio (O2/O1)', loc='center', fontsize=18) fig.tight_layout() - fig.savefig(WK_DIR+"/obs/ERA5_Top_Heaviness_Ratio_2000_2019_July.png", format='png',bbox_inches='tight') + fig.savefig(WK_DIR + "/obs/ERA5_Top_Heaviness_Ratio_2000_2019_July.png", format='png', bbox_inches='tight') print("Plotting Completed") -top_heaviness_ratio_calculation_obs(OBS_DATA+'/ERA5_omega_mon_2000_2019_July.nc') +top_heaviness_ratio_calculation_obs(OBS_DATA + '/ERA5_omega_mon_2000_2019_July.nc') diff --git a/diagnostics/top_heaviness_metric/top_heaviness_ratio_robustness_calc.py b/diagnostics/top_heaviness_metric/top_heaviness_ratio_robustness_calc.py index a8e12e783..c9766d375 100644 --- a/diagnostics/top_heaviness_metric/top_heaviness_ratio_robustness_calc.py +++ b/diagnostics/top_heaviness_metric/top_heaviness_ratio_robustness_calc.py @@ -2,92 +2,94 @@ import os import xarray as xr import numpy as np -import scipy +import scipy from scipy import interpolate from scipy import integrate import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.mpl.ticker as cticker - -#Setting variables equal to environment variables set by the diagnostic +# Setting variables equal to environment variables set by the diagnostic time_coord = os.environ["time_coord"] lat_coord = os.environ["lat_coord"] lon_coord = os.environ["lon_coord"] lev_coord = os.environ["lev_coord"] -omega_var = os.environ["omega_var"] -WK_DIR = os.environ["WK_DIR"] -OBS_DATA = os.environ["OBS_DATA"] -CASENAME = os.environ["CASENAME"] +omega_var = os.environ["omega_var"] +WK_DIR = os.environ["WORK_DIR"] +OBS_DATA = os.environ["OBS_DATA"] +CASENAME = os.environ["CASENAME"] + -#from numba import jit -#@jit(nopython=True) -def corr2d(a1,a2): - ij=np.shape(a1[:]) - mid_corr=np.zeros(ij[1:]) +# from numba import jit +# @jit(nopython=True) +def corr2d(a1, a2): + ij = np.shape(a1[:]) + mid_corr = np.zeros(ij[1:]) for i in np.arange(ij[1]): for j in np.arange(ij[2]): - mid_corr[i,j]=np.corrcoef(a1[:,i,j],a2[:,i,j])[0,1] + mid_corr[i, j] = np.corrcoef(a1[:, i, j], a2[:, i, j])[0, 1] return mid_corr -#====================== deriving model output ======================= + +# ====================== deriving model output ======================= def top_heaviness_ratio_robustness_calc_model(reanalysis_path, reanalysis_var): # read omega and land-sea fraction data - ds= xr.open_dataset(reanalysis_path) - lev_model=ds[lev_coord].values - lat_model=ds[lat_coord].values - lon_model=ds[lon_coord].values - isort=np.argsort(lev_model)[::-1] # descending - mid_omega_model=ds[reanalysis_var].values # mon x lev x lat x lon; for the sample data (JAS over 2000-2019) - mid_omega_model=mid_omega_model[:,isort] - mid_omega_model_ltm=np.nanmean(mid_omega_model,axis=0) # lev x lat x lon - #======================Interpolation======================= - dp=lev_model[-1]-lev_model[0] - levs_interp=np.linspace(lev_model[0], lev_model[-1], num=len(lev_model)) - f=interpolate.interp1d(lev_model, mid_omega_model_ltm, kind='cubic', axis=0) # you can choose linear which consumes less time - mid_omega_model_ltm=f(levs_interp) - #======================deriving O1 and O2======================= + ds = xr.open_dataset(reanalysis_path) + lev_model = ds[lev_coord].values + lat_model = ds[lat_coord].values + lon_model = ds[lon_coord].values + isort = np.argsort(lev_model)[::-1] # descending + mid_omega_model = ds[reanalysis_var].values # mon x lev x lat x lon; for the sample data (JAS over 2000-2019) + mid_omega_model = mid_omega_model[:, isort] + mid_omega_model_ltm = np.nanmean(mid_omega_model, axis=0) # lev x lat x lon + # ======================Interpolation======================= + dp = lev_model[-1] - lev_model[0] + levs_interp = np.linspace(lev_model[0], lev_model[-1], num=len(lev_model)) + f = interpolate.interp1d(lev_model, mid_omega_model_ltm, kind='cubic', + axis=0) # you can choose linear which consumes less time + mid_omega_model_ltm = f(levs_interp) + # ======================deriving O1 and O2======================= # construct Q1_model as half a sine wave and Q2_model as a full sine wave # two base functions; Q1: idealized deep convection profile; Q2: Deep stratiform profile (Back et al. 2017) - Q1_model=np.zeros(len(levs_interp)) - Q2_model=np.zeros(len(levs_interp)) + Q1_model = np.zeros(len(levs_interp)) + Q2_model = np.zeros(len(levs_interp)) for i in range(len(levs_interp)): - Q1_model[i]=-np.sin(np.pi*(levs_interp[i]-levs_interp[0])/dp) - Q2_model[i]=np.sin(2*np.pi*(levs_interp[i]-levs_interp[0])/dp) - #Normalize - factor=integrate.trapz(Q1_model*Q1_model,lev_model)/dp - Q1_model=Q1_model/np.sqrt(factor) - factor=integrate.trapz(Q2_model*Q2_model,lev_model)/dp - Q2_model=Q2_model/np.sqrt(factor) - #======================calculate explained variance over the globe======================= + Q1_model[i] = -np.sin(np.pi * (levs_interp[i] - levs_interp[0]) / dp) + Q2_model[i] = np.sin(2 * np.pi * (levs_interp[i] - levs_interp[0]) / dp) + # Normalize + factor = integrate.trapz(Q1_model * Q1_model, lev_model) / dp + Q1_model = Q1_model / np.sqrt(factor) + factor = integrate.trapz(Q2_model * Q2_model, lev_model) / dp + Q2_model = Q2_model / np.sqrt(factor) + # ======================calculate explained variance over the globe======================= # Often times, the pres level is not equally distributed. People tend to increase density in the # ... upper and lower atmopshere, leaving a less dense distribution in the mid atmosphere. # ... Thus, it is important to weight Q1 and Q2 before we calculate R2 # Remember, we already take weights into account when calculating O1 and O2 - mid_Q1_model=Q1_model/np.sqrt(np.sum(Q1_model**2)) # unitize Q1 - mid_Q2_model=Q2_model/np.sqrt(np.sum(Q2_model**2)) # unitize Q2 + mid_Q1_model = Q1_model / np.sqrt(np.sum(Q1_model ** 2)) # unitize Q1 + mid_Q2_model = Q2_model / np.sqrt(np.sum(Q2_model ** 2)) # unitize Q2 # calc ltm O1 and O2, because calculating correlation is not a linear operation - OQ1_model_ltm=np.nansum(mid_Q1_model[:,None,None]*mid_omega_model_ltm,axis=0) - OQ2_model_ltm=np.nansum(mid_Q2_model[:,None,None]*mid_omega_model_ltm,axis=0) - OQQ1_model=OQ1_model_ltm[None,:,:]*mid_Q1_model[:,None,None] # reconstruct Q1 field - OQQ2_model=OQ2_model_ltm[None,:,:]*mid_Q2_model[:,None,None] # reconstruct Q2 field - OQQ_sum=OQQ1_model+OQQ2_model - corr2d_model=corr2d(mid_omega_model_ltm,OQQ_sum) - R2_model=corr2d_model**2 - #====================== setting up figures ======================= - ilat=np.argsort(lat_model) - ilon=np.argsort(lon_model) - #====================== R2 ======================= + OQ1_model_ltm = np.nansum(mid_Q1_model[:, None, None] * mid_omega_model_ltm, axis=0) + OQ2_model_ltm = np.nansum(mid_Q2_model[:, None, None] * mid_omega_model_ltm, axis=0) + OQQ1_model = OQ1_model_ltm[None, :, :] * mid_Q1_model[:, None, None] # reconstruct Q1 field + OQQ2_model = OQ2_model_ltm[None, :, :] * mid_Q2_model[:, None, None] # reconstruct Q2 field + OQQ_sum = OQQ1_model + OQQ2_model + corr2d_model = corr2d(mid_omega_model_ltm, OQQ_sum) + R2_model = corr2d_model ** 2 + # ====================== setting up figures ======================= + ilat = np.argsort(lat_model) + ilon = np.argsort(lon_model) + # ====================== R2 ======================= # R2 measures the proportion of ltm omega profile explained by Q1 and Q2 - fig, axes = plt.subplots(figsize=(8,4)) - mmid=R2_model + fig, axes = plt.subplots(figsize=(8, 4)) + mmid = R2_model axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(0,1.,0.1) - im0 = axes.contourf(lon_model, lat_model, mmid, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(0, 1., 0.1) + im0 = axes.contourf(lon_model, lat_model, mmid, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_model[ilon][0],lon_model[ilon][-1],60) - lat_grid = np.arange(lat_model[ilat][0],lat_model[ilat][-1],30) + lon_grid = np.arange(lon_model[ilon][0], lon_model[ilon][-1], 60) + lat_grid = np.arange(lat_model[ilat][0], lat_model[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -99,73 +101,75 @@ def top_heaviness_ratio_robustness_calc_model(reanalysis_path, reanalysis_var): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('$R^{2}$ Between Recon. Omega & Original',loc='center',fontsize=18) - fig.savefig(WK_DIR+"/model/"+CASENAME+"_R2.png", format='png',bbox_inches='tight') + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('$R^{2}$ Between Recon. Omega & Original', loc='center', fontsize=18) + fig.savefig(WK_DIR + "/model/" + CASENAME + "_R2.png", format='png', bbox_inches='tight') print("Plotting Completed") - -top_heaviness_ratio_robustness_calc_model(os.environ["OMEGA_FILE"],os.environ["omega_var"]) -#====================== deriving obs output ======================= +top_heaviness_ratio_robustness_calc_model(os.environ["OMEGA_FILE"], os.environ["omega_var"]) + + +# ====================== deriving obs output ======================= def top_heaviness_ratio_robustness_calc_obs(obs_data_full_dir): # read omega - ds= xr.open_dataset(obs_data_full_dir) - lev_obs=ds['lev'].values - lat_obs=ds['lat'].values - lon_obs=ds['lon'].values - isort=np.argsort(lev_obs)[::-1] # descending - mid_omega_obs=ds['omega'].values # mon x lev x lat x lon; for the sample data (July over 2000-2019) - mid_omega_obs=mid_omega_obs[:,isort] - mid_omega_obs_ltm=np.nanmean(mid_omega_obs,axis=0) # lev x lat x lon - #======================Interpolation======================= - dp=lev_obs[-1]-lev_obs[0] - levs_interp=np.linspace(lev_obs[0], lev_obs[-1], num=len(lev_obs)) - f=interpolate.interp1d(lev_obs, mid_omega_obs_ltm, kind='cubic', axis=0) # you can choose linear which consumes less time - mid_omega_obs_ltm=f(levs_interp) - #======================deriving O1 and O2======================= + ds = xr.open_dataset(obs_data_full_dir) + lev_obs = ds['lev'].values + lat_obs = ds['lat'].values + lon_obs = ds['lon'].values + isort = np.argsort(lev_obs)[::-1] # descending + mid_omega_obs = ds['omega'].values # mon x lev x lat x lon; for the sample data (July over 2000-2019) + mid_omega_obs = mid_omega_obs[:, isort] + mid_omega_obs_ltm = np.nanmean(mid_omega_obs, axis=0) # lev x lat x lon + # ======================Interpolation======================= + dp = lev_obs[-1] - lev_obs[0] + levs_interp = np.linspace(lev_obs[0], lev_obs[-1], num=len(lev_obs)) + f = interpolate.interp1d(lev_obs, mid_omega_obs_ltm, kind='cubic', + axis=0) # you can choose linear which consumes less time + mid_omega_obs_ltm = f(levs_interp) + # ======================deriving O1 and O2======================= # construct Q1_obs as half a sine wave and Q2_obs as a full sine wave # two base functions; Q1: idealized deep convection profile; Q2: Deep stratiform profile (Back et al. 2017) - Q1_obs=np.zeros(len(levs_interp)) - Q2_obs=np.zeros(len(levs_interp)) + Q1_obs = np.zeros(len(levs_interp)) + Q2_obs = np.zeros(len(levs_interp)) for i in range(len(levs_interp)): - Q1_obs[i]=-np.sin(np.pi*(levs_interp[i]-levs_interp[0])/dp) - Q2_obs[i]=np.sin(2*np.pi*(levs_interp[i]-levs_interp[0])/dp) - #Normalize - factor=scipy.integrate.trapz(Q1_obs*Q1_obs,lev_obs)/dp - Q1_obs=Q1_obs/np.sqrt(factor) - factor=scipy.integrate.trapz(Q2_obs*Q2_obs,lev_obs)/dp - Q2_obs=Q2_obs/np.sqrt(factor) - #======================calculate explained variance over the globe======================= + Q1_obs[i] = -np.sin(np.pi * (levs_interp[i] - levs_interp[0]) / dp) + Q2_obs[i] = np.sin(2 * np.pi * (levs_interp[i] - levs_interp[0]) / dp) + # Normalize + factor = scipy.integrate.trapz(Q1_obs * Q1_obs, lev_obs) / dp + Q1_obs = Q1_obs / np.sqrt(factor) + factor = scipy.integrate.trapz(Q2_obs * Q2_obs, lev_obs) / dp + Q2_obs = Q2_obs / np.sqrt(factor) + # ======================calculate explained variance over the globe======================= # Often times, the pres level is not equally distributed. People tend to increase density in the # ... upper and lower atmopshere, leaving a less dense distribution in the mid atmosphere. # ... Thus, it is important to weight Q1 and Q2 before we calculate explained variance # Remember, we already take weights into account when calculating O1 and O2 - mid_Q1_obs=Q1_obs/np.sqrt(np.sum(Q1_obs**2)) # unitize Q1 - mid_Q2_obs=Q2_obs/np.sqrt(np.sum(Q2_obs**2)) # unitize Q2 + mid_Q1_obs = Q1_obs / np.sqrt(np.sum(Q1_obs ** 2)) # unitize Q1 + mid_Q2_obs = Q2_obs / np.sqrt(np.sum(Q2_obs ** 2)) # unitize Q2 # calc ltm O1 and O2, because calculating correlation is not a linear operation - OQ1_obs_ltm=np.nansum(mid_Q1_obs[:,None,None]*mid_omega_obs_ltm,axis=0) - OQ2_obs_ltm=np.nansum(mid_Q2_obs[:,None,None]*mid_omega_obs_ltm,axis=0) - OQQ1_obs=OQ1_obs_ltm[None,:,:]*mid_Q1_obs[:,None,None] # reconstruct Q1 field - OQQ2_obs=OQ2_obs_ltm[None,:,:]*mid_Q2_obs[:,None,None] # reconstruct Q2 field - OQQ_sum=OQQ1_obs+OQQ2_obs - corr2d_obs=corr2d(mid_omega_obs_ltm,OQQ_sum) - R2_obs=corr2d_obs**2 - #====================== setting up figures ======================= - ilat=np.argsort(lat_obs) - ilon=np.argsort(lon_obs) - #====================== R2 ======================= + OQ1_obs_ltm = np.nansum(mid_Q1_obs[:, None, None] * mid_omega_obs_ltm, axis=0) + OQ2_obs_ltm = np.nansum(mid_Q2_obs[:, None, None] * mid_omega_obs_ltm, axis=0) + OQQ1_obs = OQ1_obs_ltm[None, :, :] * mid_Q1_obs[:, None, None] # reconstruct Q1 field + OQQ2_obs = OQ2_obs_ltm[None, :, :] * mid_Q2_obs[:, None, None] # reconstruct Q2 field + OQQ_sum = OQQ1_obs + OQQ2_obs + corr2d_obs = corr2d(mid_omega_obs_ltm, OQQ_sum) + R2_obs = corr2d_obs ** 2 + # ====================== setting up figures ======================= + ilat = np.argsort(lat_obs) + ilon = np.argsort(lon_obs) + # ====================== R2 ======================= # R2 measures the proportion of ltm omega profile explained by Q1 and Q2 - fig, axes = plt.subplots(figsize=(8,4)) - mmid=R2_obs + fig, axes = plt.subplots(figsize=(8, 4)) + mmid = R2_obs axes = plt.axes(projection=ccrs.PlateCarree(central_longitude=180)) - clevs=np.arange(0,1.,0.1) - im0 = axes.contourf(lon_obs, lat_obs, mmid, clevs,cmap = plt.get_cmap('RdBu_r'),extend='both', - transform=ccrs.PlateCarree()) + clevs = np.arange(0, 1., 0.1) + im0 = axes.contourf(lon_obs, lat_obs, mmid, clevs, cmap=plt.get_cmap('RdBu_r'), extend='both', + transform=ccrs.PlateCarree()) axes.coastlines() - lon_grid = np.arange(lon_obs[ilon][0],lon_obs[ilon][-1],60) - lat_grid = np.arange(lat_obs[ilat][0],lat_obs[ilat][-1],30) + lon_grid = np.arange(lon_obs[ilon][0], lon_obs[ilon][-1], 60) + lat_grid = np.arange(lat_obs[ilat][0], lat_obs[ilat][-1], 30) # set x labels axes.set_xticks(lon_grid, crs=ccrs.PlateCarree()) axes.set_xticklabels(lon_grid, rotation=0, fontsize=14) @@ -177,22 +181,13 @@ def top_heaviness_ratio_robustness_calc_obs(obs_data_full_dir): lat_formatter = cticker.LatitudeFormatter() axes.yaxis.set_major_formatter(lat_formatter) # colorbar - fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15,shrink=.9,aspect=45) - axes.set_title('$R^{2}$ Between Recon. Omega & Original',loc='center',fontsize=18) + fig.colorbar(im0, ax=axes, orientation="horizontal", pad=0.15, shrink=.9, aspect=45) + axes.set_title('$R^{2}$ Between Recon. Omega & Original', loc='center', fontsize=18) fig.tight_layout() - fig.savefig(WK_DIR+"/obs/ERA5_R2_Between_Recon_Omega&Original_2000_2019_July.png", format='png',bbox_inches='tight') + fig.savefig(WK_DIR + "/obs/ERA5_R2_Between_Recon_Omega&Original_2000_2019_July.png", format='png', + bbox_inches='tight') print("Plotting Completed") - - -top_heaviness_ratio_robustness_calc_obs(OBS_DATA+'/ERA5_omega_mon_2000_2019_July.nc') - - - - - - - - +top_heaviness_ratio_robustness_calc_obs(OBS_DATA + '/ERA5_omega_mon_2000_2019_July.nc') diff --git a/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/io_cmems_adt.py b/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/io_cmems_adt.py index 8e281bd06..f115afb4b 100644 --- a/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/io_cmems_adt.py +++ b/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/io_cmems_adt.py @@ -40,7 +40,6 @@ def annual_to_monthly(basedir, year): print(path) # obtain a list of files for each year - #files = list(scantree(path)) files = [x.path for x in list(scantree(path)) if x.path.endswith(".nc")] # open a multi-file dataset and extract the variable diff --git a/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/xr_ufunc.py b/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/xr_ufunc.py index fa9527dc3..02b3ddb9e 100644 --- a/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/xr_ufunc.py +++ b/diagnostics/tropical_pacific_sea_level/obs_data_preprocess/xr_ufunc.py @@ -21,7 +21,7 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_conf (xr.DataArray) - a 2 dimension gridded data representing the confidence interval of the linear trend """ - if skipna == True: + if skipna: # make sure the order of the dataarray is correct da_data = da_data.transpose(tname,yname,xname) @@ -34,7 +34,6 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_r_value = da_slope.copy() * np.nan da_p_value = da_slope.copy() * np.nan da_std_err = da_slope.copy() * np.nan - da_conf = da_slope.copy() * np.nan for xx in range(nx): for yy in range(ny): @@ -42,7 +41,6 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_data[:, yy, xx].notnull(), drop=True ) if len(da_ts) > 0: - da_time = da_ts.time.copy() year = da_ts["time.year"].values month = da_ts["time.month"].values @@ -59,7 +57,6 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_std_err[yy, xx] = std_err else: - da_time = da_data.time.copy() year = da_data["time.year"].values month = da_data["time.month"].values @@ -82,7 +79,7 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn # vectorize=True,dask='parallelized', # dask_gufunc_kwargs={"allow_rechunk":True}) - ### calculate confidence interval + # calculate confidence interval # calculate the error bar base on the number of standard error # the number related to dist. percentage is derived base on Students's T # distribution diff --git a/diagnostics/tropical_pacific_sea_level/settings.jsonc b/diagnostics/tropical_pacific_sea_level/settings.jsonc index dbf650912..726fad155 100644 --- a/diagnostics/tropical_pacific_sea_level/settings.jsonc +++ b/diagnostics/tropical_pacific_sea_level/settings.jsonc @@ -2,7 +2,7 @@ "settings" : { "driver" : "tropical_pacific_sea_level.py", "long_name" : "tropical pacific sea level", - "realm" : "ocean", + "convention": "cmip", "description" : "Tropical pacific sea level diagnostic", "runtime_requirements": { "python3" : ["matplotlib","xarray","cartopy","cftime","numpy","dask"] @@ -28,10 +28,14 @@ "dimensions": { "lat": { - "standard_name": "latitude" + "standard_name": "latitude", + "units": "degrees_north", + "axis": "Y" }, "lon": { - "standard_name": "longitude" + "standard_name": "longitude", + "units": "degrees_east", + "axis": "X" }, "time": { "standard_name": "time" @@ -41,24 +45,28 @@ "varlist" : { "zos": { "standard_name": "sea_surface_height_above_geoid", + "realm": "ocean", "units": "m", "dimensions" : ["time", "lat", "lon"] }, "tauuo": { "standard_name": "downward_x_stress_at_sea_water_surface", + "realm": "ocean", "units": "N m-2", "dimensions" : ["time", "lat", "lon"] }, "tauvo": { "standard_name": "downward_y_stress_at_sea_water_surface", + "realm": "ocean", "units": "N m-2", "dimensions" : ["time", "lat", "lon"] }, "areacello": { "standard_name": "cell_area", + "realm": "ocean", "units": "m2", "modifier" : "ocean_realm", "dimensions" : ["lat", "lon"] diff --git a/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.html b/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.html index 2e6cb2267..24a0dd6d7 100644 --- a/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.html +++ b/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.html @@ -24,7 +24,7 @@

Example diagnostic: time scale diagnostic of wind stress and sea level

Surface Temperature Extremes and Distribution Shape Diagnostics +< color=navy> Surface Temperature Extremes and Distribution Shape Diagnostics {{CASENAME}} OBS
- diff --git a/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.py b/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.py index c02a30454..0cb4f7549 100644 --- a/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.py +++ b/diagnostics/tropical_pacific_sea_level/tropical_pacific_sea_level.py @@ -55,15 +55,11 @@ import numpy as np import matplotlib.pyplot as plt - import spherical_area as sa from xr_ufunc import da_linregress from dynamical_balance2 import curl_tau, curl_tau_3d from dynamical_balance2 import curl_var, curl_var_3d - - - warnings.simplefilter("ignore") print("--------------------------") @@ -71,8 +67,8 @@ print("--------------------------") # constant setting -syear = int(os.getenv("FIRSTYR")) # crop model data from year -fyear = int(os.getenv("LASTYR")) # crop model data to year +syear = int(os.getenv("startdate")) # crop model data from year +fyear = int(os.getenv("enddate")) # crop model data to year predef_obs = os.getenv("predef_obs") obs_start_year = int(os.getenv("obs_start_year")) # crop obs data from year obs_end_year = int(os.getenv("obs_end_year")) # crop obs data to year @@ -98,7 +94,6 @@ ] areafile = os.getenv("AREACELLO_FILE") - Model_varname = [os.getenv("tauuo_var"), os.getenv("tauvo_var"), os.getenv("zos_var")] Model_coordname = [os.getenv("lat_coord_name"), os.getenv("lon_coord_name")] @@ -108,18 +103,16 @@ print("Ocean model axis name:", xname, yname) print("Ocean model coord name:", Model_coordname[1], Model_coordname[0]) - print("--------------------------") print("Start processing model outputs") print("--------------------------") - ds_model_mlist = {} mean_mlist = {} season_mlist = {} linear_mlist = {} -#### models +# models for nmodel, model in enumerate(Model_name): ds_model_list = {} mean_list = {} @@ -259,12 +252,12 @@ [ds_obs, ds_obs_sub], dim="time", data_vars="minimal" ) - ############## CMEMS ############## + # CMEMS ############## if obs in ["CMEMS"]: syear_obs = obs_year_range[nobs][0] fyear_obs = obs_year_range[nobs][1] fmon_obs = obs_year_range[nobs][2] - #### create time axis for overlapping period + # create time axis for overlapping period timeax = xr.cftime_range( start=cftime.datetime(syear_obs, 1, 1), end=cftime.datetime(fyear_obs, fmon_obs, 1), @@ -299,7 +292,7 @@ else: syear_obs = obs_year_range[nobs][0] fyear_obs = obs_year_range[nobs][1] - #### create time axis for overlapping period + # create time axis for overlapping period timeax = xr.cftime_range( start=cftime.datetime(syear_obs, 1, 1), end=cftime.datetime(fyear_obs, 12, 31), @@ -328,7 +321,7 @@ ds_obs_list[var].groupby("time.month").mean(dim="time").compute() ) ds_obs_list[var] = ( - ds_obs_list[var].groupby("time.month") - obs_season_list[var] + ds_obs_list[var].groupby("time.month") - obs_season_list[var] ) # remove linear trend @@ -345,7 +338,6 @@ obs_season_mlist[obs] = obs_season_list ds_obs_mlist[obs] = ds_obs_list - # # Derive wind stress curl ######### # Model @@ -359,7 +351,7 @@ ) linear_mlist[model]["curl_tau"] = ( - linear_mlist[model]["curl_tauuo"] + linear_mlist[model]["curl_tauvo"] + linear_mlist[model]["curl_tauuo"] + linear_mlist[model]["curl_tauvo"] ) mean_mlist[model]["curl_tauuo"], mean_mlist[model]["curl_tauvo"] = curl_tau( @@ -367,7 +359,7 @@ ) mean_mlist[model]["curl_tau"] = ( - mean_mlist[model]["curl_tauuo"] + mean_mlist[model]["curl_tauvo"] + mean_mlist[model]["curl_tauuo"] + mean_mlist[model]["curl_tauvo"] ) season_mlist[model]["curl_tauuo"], season_mlist[model]["curl_tauvo"] = curl_tau_3d( @@ -379,7 +371,7 @@ ) season_mlist[model]["curl_tau"] = ( - season_mlist[model]["curl_tauuo"] + season_mlist[model]["curl_tauvo"] + season_mlist[model]["curl_tauuo"] + season_mlist[model]["curl_tauvo"] ) ######### @@ -395,7 +387,7 @@ ) obs_linear_mlist[obs]["curl_tau"] = ( - obs_linear_mlist[obs]["curl_tx"] + obs_linear_mlist[obs]["curl_ty"] + obs_linear_mlist[obs]["curl_tx"] + obs_linear_mlist[obs]["curl_ty"] ) obs_mean_mlist[obs]["curl_tx"], obs_mean_mlist[obs]["curl_ty"] = curl_var( @@ -403,7 +395,7 @@ ) obs_mean_mlist[obs]["curl_tau"] = ( - obs_mean_mlist[obs]["curl_tx"] + obs_mean_mlist[obs]["curl_ty"] + obs_mean_mlist[obs]["curl_tx"] + obs_mean_mlist[obs]["curl_ty"] ) obs_season_mlist[obs]["curl_tx"], obs_season_mlist[obs]["curl_ty"] = curl_var_3d( @@ -415,7 +407,7 @@ ) obs_season_mlist[obs]["curl_tau"] = ( - obs_season_mlist[obs]["curl_tx"] + obs_season_mlist[obs]["curl_ty"] + obs_season_mlist[obs]["curl_tx"] + obs_season_mlist[obs]["curl_ty"] ) else: @@ -457,9 +449,8 @@ os.path.join(PREFILE_DIR, "waswind_curltau_season.nc") ).curl_tau - # # Regional averaging -#### setting regional range +# setting regional range lon_range = lon_range_list lat_range = lat_range_list @@ -469,8 +460,6 @@ ind1 = np.where(lon_range_mod < float(0))[0] lon_range_mod[ind1] = lon_range_mod[ind1] + 360.0 # change Lon range to 0-360 - - ##################### # MODEL ##################### @@ -478,16 +467,15 @@ for nmodel, model in enumerate(Model_name): regionalavg_list = {} for nvar, var in enumerate(["curl_tau", "zos"]): - # read areacello da_area = ds_areacello[os.getenv("areacello_var")] da_area_temp = da_area.copy() - da_area_temp['lon'] = xr.where(da_area_temp.lon<0, - da_area_temp.lon+360., - da_area_temp.lon) + da_area_temp['lon'] = xr.where(da_area_temp.lon < 0, + da_area_temp.lon + 360., + da_area_temp.lon) # crop region - #ds_mask = ( + # ds_mask = ( # da_area # .where( # (da_area_temp[Model_coordname[1]] >= np.min(lon_range_mod)) @@ -497,7 +485,7 @@ # drop=True, # ) # .compute() - #) + # ) # NOTE: the code below is substituted to solve issue with indexing boolean dask arrays # The solution proposed is to call compute on each boolean index first @@ -508,11 +496,11 @@ lat_range_max = np.max(lat_range) da_area.where( (da_area_temp[Model_coordname[1]] >= lon_range_min - & da_area_temp[Model_coordname[1]] <= lon_range_max - & da_area_temp[Model_coordname[0]] >= lat_range_min - & da_area_temp[Model_coordname[0]] <= lat_range_max).compute(), + & da_area_temp[Model_coordname[1]] <= lon_range_max + & da_area_temp[Model_coordname[0]] >= lat_range_min + & da_area_temp[Model_coordname[0]] <= lat_range_max).compute(), drop=True - ) + ) ds_mask = ds_mask / ds_mask @@ -520,27 +508,26 @@ regionalavg_list[ "%s_%i_%i_%i_%i_season" % (var, lon_range[0], lon_range[1], lat_range[0], lat_range[1]) - ] = ( - (season_mlist[model][var] * ds_mask * da_area).sum(dim=[xname, yname]) - / (ds_mask * da_area).sum(dim=[xname, yname]) + ] = ( + (season_mlist[model][var] * ds_mask * da_area).sum(dim=[xname, yname]) + / (ds_mask * da_area).sum(dim=[xname, yname]) ).compute() regionalavg_list[ "%s_%i_%i_%i_%i_mean" % (var, lon_range[0], lon_range[1], lat_range[0], lat_range[1]) - ] = ( - (mean_mlist[model][var] * ds_mask * da_area).sum(dim=[xname, yname]) - / (ds_mask * da_area).sum(dim=[xname, yname]) + ] = ( + (mean_mlist[model][var] * ds_mask * da_area).sum(dim=[xname, yname]) + / (ds_mask * da_area).sum(dim=[xname, yname]) ).compute() regionalavg_list[ "%s_%i_%i_%i_%i_linear" % (var, lon_range[0], lon_range[1], lat_range[0], lat_range[1]) - ] = ( - (linear_mlist[model][var] * ds_mask * da_area).sum(dim=[xname, yname]) - / (ds_mask * da_area).sum(dim=[xname, yname]) + ] = ( + (linear_mlist[model][var] * ds_mask * da_area).sum(dim=[xname, yname]) + / (ds_mask * da_area).sum(dim=[xname, yname]) ).compute() - regionalavg_mlist[model] = regionalavg_list @@ -587,40 +574,38 @@ obs_regionalavg_list[ "%s_%i_%i_%i_%i_season" % (var, lon_range[0], lon_range[1], lat_range[0], lat_range[1]) - ] = ( - (obs_season_mlist[obs][var] * ds_obs_mask * da_area).sum( - dim=[obs_xname, obs_yname] - ) - / (ds_obs_mask * da_area).sum(dim=[obs_xname, obs_yname]) + ] = ( + (obs_season_mlist[obs][var] * ds_obs_mask * da_area).sum( + dim=[obs_xname, obs_yname] + ) + / (ds_obs_mask * da_area).sum(dim=[obs_xname, obs_yname]) ).compute() obs_regionalavg_list[ "%s_%i_%i_%i_%i_mean" % (var, lon_range[0], lon_range[1], lat_range[0], lat_range[1]) - ] = ( - (obs_mean_mlist[obs][var] * ds_obs_mask * da_area).sum( - dim=[obs_xname, obs_yname] - ) - / (ds_obs_mask * da_area).sum(dim=[obs_xname, obs_yname]) + ] = ( + (obs_mean_mlist[obs][var] * ds_obs_mask * da_area).sum( + dim=[obs_xname, obs_yname] + ) + / (ds_obs_mask * da_area).sum(dim=[obs_xname, obs_yname]) ).compute() obs_regionalavg_list[ "%s_%i_%i_%i_%i_linear" % (var, lon_range[0], lon_range[1], lat_range[0], lat_range[1]) - ] = ( - (obs_linear_mlist[obs][var] * ds_obs_mask * da_area).sum( - dim=[obs_xname, obs_yname] - ) - / (ds_obs_mask * da_area).sum(dim=[obs_xname, obs_yname]) + ] = ( + (obs_linear_mlist[obs][var] * ds_obs_mask * da_area).sum( + dim=[obs_xname, obs_yname] + ) + / (ds_obs_mask * da_area).sum(dim=[obs_xname, obs_yname]) ).compute() obs_regionalavg_mlist[obs] = obs_regionalavg_list - -#### plotting +#plotting fig = plt.figure(1) - ####### # mean ####### @@ -647,7 +632,7 @@ ssh = regionalavg_mlist[model][ f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_mean" ] - + ax1.scatter(wsc, ssh, label="%s" % (Model_legend_name[nmodel])) all_wsc.append(wsc) all_ssh.append(ssh) @@ -655,7 +640,7 @@ all_wsc = np.array(all_wsc) all_ssh = np.array(all_ssh) -#### setting the plotting format +# setting the plotting format ax1.set_ylabel("SSH (m)", {"size": "20"}, color="k") ax1.set_ylim([all_ssh.min() - all_ssh.min() / 5.0, all_ssh.max() + all_ssh.max() / 5.0]) ax1.set_xlabel("WSC (N/m$^3$)", {"size": "20"}, color="k") @@ -674,10 +659,10 @@ all_ssh = [] wsc = obs_regionalavg_mlist["WASwind"][ -f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" ] ssh = obs_regionalavg_mlist["CMEMS"][ -f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" + f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" ].slope ax1.scatter(wsc, ssh, c="k", label="Observation") all_wsc.append(wsc) @@ -685,10 +670,10 @@ for nmodel, model in enumerate(Model_name): wsc = regionalavg_mlist[model][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" ] ssh = regionalavg_mlist[model][ - f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" + f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_linear" ].slope ax1.scatter(wsc, ssh, label="%s" % (Model_legend_name[nmodel])) all_wsc.append(wsc) @@ -697,8 +682,7 @@ all_wsc = np.array(all_wsc) all_ssh = np.array(all_ssh) - -#### setting the plotting format +# setting the plotting format ax1.set_ylabel("SSH (m)", {"size": "20"}, color="k") ax1.set_ylim( [ @@ -719,7 +703,6 @@ ax1.legend(loc="upper left", bbox_to_anchor=(1.05, 1), fontsize=14, facecolor=None) ax1.grid(linestyle="dashed", alpha=0.5, color="grey") - ######### # Annual amp ######### @@ -729,20 +712,20 @@ all_ssh = [] wsc = ( - obs_regionalavg_mlist["WASwind"][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].max() - - obs_regionalavg_mlist["WASwind"][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].min() + obs_regionalavg_mlist["WASwind"][ + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].max() + - obs_regionalavg_mlist["WASwind"][ + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].min() ) ssh = ( - obs_regionalavg_mlist["CMEMS"][ - f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].max() - - obs_regionalavg_mlist["CMEMS"][ - f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].min() + obs_regionalavg_mlist["CMEMS"][ + f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].max() + - obs_regionalavg_mlist["CMEMS"][ + f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].min() ) wsc = np.abs(wsc) ssh = np.abs(ssh) @@ -752,20 +735,20 @@ for nmodel, model in enumerate(Model_name): wsc = ( - regionalavg_mlist[model][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].max() - - regionalavg_mlist[model][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].min() + regionalavg_mlist[model][ + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].max() + - regionalavg_mlist[model][ + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].min() ) ssh = ( - regionalavg_mlist[model][ - f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].max() - - regionalavg_mlist[model][ - f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" - ].min() + regionalavg_mlist[model][ + f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].max() + - regionalavg_mlist[model][ + f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + ].min() ) ax1.scatter(wsc, ssh, label="%s" % (Model_legend_name[nmodel])) wsc = np.abs(wsc) @@ -773,12 +756,10 @@ all_wsc.append(wsc) all_ssh.append(ssh) - all_wsc = np.array(all_wsc) all_ssh = np.array(all_ssh) - -#### setting the plotting format +# setting the plotting format ax1.set_ylabel("SSH (m)", {"size": "20"}, color="k") ax1.set_ylim([all_ssh.min() - all_ssh.min() / 5.0, all_ssh.max() + all_ssh.max() / 5.0]) ax1.set_xlabel("WSC (N/m$^3$)", {"size": "20"}, color="k") @@ -788,22 +769,20 @@ ax1.set_title("Annual amplitude", {"size": "24"}, pad=24) ax1.grid(linestyle="dashed", alpha=0.5, color="grey") - ######### # Annual phase ######### ax1 = fig.add_axes([1.3, -1.5, 1, 1]) - all_wsc = [] all_ssh = [] ind = obs_regionalavg_mlist["WASwind"][ -f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ].argmax() wsc = ( obs_regionalavg_mlist["WASwind"][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ] .isel(month=ind) .month.values @@ -813,7 +792,7 @@ ].argmax() ssh = ( obs_regionalavg_mlist["CMEMS"][ - f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"adt_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ] .isel(month=ind) .month.values @@ -825,21 +804,21 @@ for nmodel, model in enumerate(Model_name): ind = regionalavg_mlist[model][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ].argmax() wsc = ( regionalavg_mlist[model][ - f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"curl_tau_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ] .isel(month=ind) .month.values ) ind = regionalavg_mlist[model][ - f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ].argmax() ssh = ( regionalavg_mlist[model][ - f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" + f"zos_{lon_range[0]:.0f}_{lon_range[1]:.0f}_{lat_range[0]:.0f}_{lat_range[1]:.0f}_season" ] .isel(month=ind) .month.values @@ -849,12 +828,10 @@ all_wsc.append(wsc) all_ssh.append(ssh) - all_wsc = np.array(all_wsc) all_ssh = np.array(all_ssh) - -#### setting the plotting format +# setting the plotting format ax1.set_ylabel("SSH (month)", {"size": "20"}, color="k") ax1.set_ylim([0.5, 12.5]) ax1.set_xlabel("WSC (month)", {"size": "20"}, color="k") @@ -864,9 +841,8 @@ ax1.set_title("Annual phase", {"size": "24"}, pad=24) ax1.grid(linestyle="dashed", alpha=0.5, color="grey") - fig.savefig( - os.getenv("WK_DIR") + "/model/PS/example_model_plot.eps", + os.getenv("WORK_DIR") + "/model/PS/example_model_plot.eps", facecolor="w", edgecolor="w", orientation="portrait", @@ -876,7 +852,7 @@ pad_inches=None, ) fig.savefig( - os.getenv("WK_DIR") + "/obs/PS/example_obs_plot.eps", + os.getenv("WORK_DIR") + "/obs/PS/example_obs_plot.eps", facecolor="w", edgecolor="w", orientation="portrait", diff --git a/diagnostics/tropical_pacific_sea_level/xr_ufunc.py b/diagnostics/tropical_pacific_sea_level/xr_ufunc.py index bc2f5e6f9..11a040b0f 100644 --- a/diagnostics/tropical_pacific_sea_level/xr_ufunc.py +++ b/diagnostics/tropical_pacific_sea_level/xr_ufunc.py @@ -1,10 +1,10 @@ -#### Function for calculating trend and mean based from linear regression +# Function for calculating trend and mean based from linear regression from scipy import stats import xarray as xr import numpy as np -def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipna=False): +def da_linregress(da_data, xname="x", yname="y", tname="time", stTconfint=0.99, skipna=False): """ The function calculate the trend of each trend in the gridded data. Trend is calculated based on stats.linregress @@ -21,12 +21,12 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_conf (xr.DataArray) - a 2 dimension gridded data representing the confidence interval of the linear trend """ - - if skipna == True: + + if skipna: # make sure the order of the dataarray is correct - da_data = da_data.transpose(tname,yname,xname) - + da_data = da_data.transpose(tname, yname, xname) + nx = len(da_data[xname]) ny = len(da_data[yname]) @@ -35,7 +35,6 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_r_value = da_slope.copy() * np.nan da_p_value = da_slope.copy() * np.nan da_std_err = da_slope.copy() * np.nan - da_conf = da_slope.copy() * np.nan for xx in range(nx): for yy in range(ny): @@ -43,7 +42,6 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_data[:, yy, xx].notnull(), drop=True ) if len(da_ts) > 0: - da_time = da_ts.time.copy() year = da_ts["time.year"].values month = da_ts["time.month"].values @@ -60,7 +58,6 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn da_std_err[yy, xx] = std_err else: - da_time = da_data.time.copy() year = da_data["time.year"].values month = da_data["time.month"].values @@ -83,7 +80,7 @@ def da_linregress(da_data,xname="x",yname="y",tname="time",stTconfint=0.99,skipn # vectorize=True,dask='parallelized', # dask_gufunc_kwargs={"allow_rechunk":True}) - ### calculate confidence interval + # calculate confidence interval # calculate the error bar base on the number of standard error # the number related to dist. percentage is derived base on Students's T # distribution diff --git a/doc/pp_flow_diagram.md b/doc/pp_flow_diagram.md new file mode 100644 index 000000000..8e9b65bad --- /dev/null +++ b/doc/pp_flow_diagram.md @@ -0,0 +1,29 @@ +```mermaid + flowchart TD + rtConfig[/runtime config file/]-->fw[framework] + fw[Framework]-->modMetadataYN{Modify\n metadata?} + modMetadataYN-- Yes -->queryFieldlist[Query fieldlist for POD vars] + fieldlist[/Variable\n Fieldlist/]-->queryFieldlist + queryFieldlist-->queryCatVar[Query ESM Intake catalog\n for POD vars] + cat1[/ESM intake catalog/]-->queryCatVar + podSet[/POD\n settings file/]-->queryCatVar + queryCatVar-->doMetadataMod[Translate data\n convert units\n ...] + doMetadataMod-->addDF[Save modified metadata\n to dataframe] + addDF-->moveFilesYN{Move input data?} + modMetadataYN-- No-->queryCatVarOnly[Query catalog for POD vars] + podSet-->queryCatVarOnly + cat1-->queryCatVarOnly + queryCatVarOnly-->addDF2[Save POD data subset\n to dataframe] + addDF2-->moveFilesYN + moveFilesYN-- Yes-->queryCatFiles[Query catalog file paths] + cat1-->queryCatFiles + queryCatFiles-->moveFiles[Move input data\n to workdir] + moveFiles-->modDF[Create/update data frame\n with workdir file paths] + moveFilesYN-- No -->ppYN{Process Data?} + modDF-->ppYN + ppYN-- No-->makeNewCat[Write new catalog\n with updated POD data info] + ppYN-- Yes-->doPP[Level extraction\n apply scale+offset\n ... ] + doPP-->writePPFiles[Write processed data files\n to workdir] + writePPFiles-->makeNewCat + makeNewCat-->cat2[/New ESM Intake catalog\n with POD-specific data/] +``` diff --git a/doc/sphinx/dev_migration.rst b/doc/sphinx/dev_migration.rst deleted file mode 100644 index c3ba7e42d..000000000 --- a/doc/sphinx/dev_migration.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. _ref-dev-migration: - -Migration from framework v2.0 -============================= - -In this section we describe the major changes made from v2.0 to v3.0 of the framework that are relevant for POD developers. The scope of the framework has expanded in version 3.0, which required changes in the way the PODs and framework interact. New developers can skip this section, as the rest of this documentation is self-contained. - -Getting Started and Developer's Walkthrough -------------------------------------------- - -A main source of documentation for v2.0 of the framework were the "Getting Started" and "Developer's Walkthrough" documents. Updated versions of these documents are: - -- `Getting Started v3.0 (PDF) `__ -- `Developer's Walkthrough v3.0 (PDF) `__ - -.. note:: - These documents contain a subset of information available on this website, rather than new material: the text is reorganized to be placed in the same order as the v2.0 documents, for ease of comparison. - -Checklist for migrating a POD from v2.0 ---------------------------------------- - -Here we list the broad set of tasks needed to update a POD written for v2.0 of the framework to v3.0. - -- **Update settings and varlist files**: In v3.0 these have been combined into a single ``settings.jsonc`` file. See the settings file :doc:`guide <./dev_settings_quick>`, :doc:`reference <./ref_settings>`, and `example `__ for descriptions of the new format. -- **Update references to framework environment variables**: See the table below for an overview, and the :doc:`reference <./ref_envvars>` for complete information on what environment variables the framework sets. *Note* that your POD should not use any hard-coded paths or variable names, but should read this information in from the framework's environment variables. -- **Resubmit digested observational data**: To minimize the size of supporting data users need to download, we ask that you only supply observational data specifically needed for plotting (preferably size within MB range), as well as any code used to perform that data reduction from raw sources. -- **Remove HTML templating code**: Version 2.0 of the framework required that your POD's top-level driver script take particular steps to assemble its HTML file. In v3.0 these tasks are done by the framework: all that your POD needs to do is generate figures of the appropriate formats and names in the specified folders, and the framework will convert and link them appropriately. - -Conversion from v2.0 environment variables ------------------------------------------- - -In v3.0, the paths referred to by the framework's environment variables have been changed to be specific to your POD. The variables themselves have been renamed to avoid possible confusion. Here's a table of the appropriate substitutions to make: - -.. list-table:: Environment variable name conversion - :header-rows: 1 - - * - Path Description - - v2.0 environment variable expression - - Equivalent v3.0 variable - * - Top-level code repository - - ``$DIAG_HOME`` - - No variable set: PODs should not access files outside of their own source code directory within ``$POD_HOME`` - * - POD's source code - - ``$VARCODE``/ - - ``$POD_HOME`` - * - POD's observational/supporting data - - ``$VARDATA``/ - - ``$OBS_DATA`` - * - POD's working directory - - ``$variab_dir``/ - - ``$WK_DIR`` - * - Path to requested NetCDF data file for at date frequency - - Currently unchanged: ``$DATADIR``//``$CASENAME``...nc - - - * - Other v2.0 paths - - ``$DATA_IN``, ``$DIAG_ROOT``, ``$WKDIR`` - - No equivalent variable set. PODs shouldn’t access files outside of their own directories; instead use one of the quantities above. diff --git a/doc/sphinx/dev_start.rst b/doc/sphinx/dev_start.rst index bda722b37..c1d18b2ee 100644 --- a/doc/sphinx/dev_start.rst +++ b/doc/sphinx/dev_start.rst @@ -26,8 +26,8 @@ This is the "beta test" version, used for testing changes before releasing them Developers may download the code from GitHub as described in :ref:`ref-download`, but we strongly recommend that you clone the repo in order to keep up with changes in the main branch, and to simplify submitting pull requests with your POD's code. Instructions for how to do this are given in :doc:`dev_git_intro`. -Installing dependencies via conda -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Installing dependencies with Conda +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Regardless of development language, we strongly recommend that developers use conda to manage their language and library versions. Note that Conda is not Python-specific, but allows coexisting versioned environments of most scripting languages, including, `R `__, `NCL `__, `Ruby `__, `PyFerret `__, and more. @@ -41,8 +41,8 @@ If you are using Anaconda or miniconda to manage the conda environments, run: % ./src/conda/conda_env_setup.sh --all --conda_root $CONDA_ROOT --env_dir $CONDA_ENV_DIR -Installing dependencies via Micromamba -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Installing dependencies with Micromamba +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Micromamba is a lightweight version of Anaconda. It is required to install the base and python3_base conda enviroments on macOS machines with Apple M-series chips. Installation instructions are available in the `Micromamba Documentation `__, Once Micromamba is installed on your system, run the following to install all conda environments if you are NOT using an @@ -102,7 +102,7 @@ If your POD requires languages that aren't available in an existing environment - We recommend specifying versions as little as possible, out of consideration for end-users: if each POD specifies exact versions of all its dependencies, conda will need to install multiple versions of the same libraries. In general, specifying a version should only be needed in cases where backward compatibility was broken (e.g., Python 2 vs. 3) or a bug affecting your POD was fixed (e.g., postscript font rendering on Mac OS with older NCL). Conda installs the latest version of each package that's consistent with all other dependencies. -Framework interaction with conda environments +Framework interaction with Conda environments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As described in :ref:`ref-execute`, when you run the ``mdtf`` executable, among other things, it reads ``pod_list`` in ``default_tests.jsonc`` and executes POD codes accordingly. For a POD included in the list (referred to as $POD_NAME): diff --git a/doc/sphinx/dev_toc.rst b/doc/sphinx/dev_toc.rst index 2afe201bd..052eb1b28 100644 --- a/doc/sphinx/dev_toc.rst +++ b/doc/sphinx/dev_toc.rst @@ -6,7 +6,6 @@ Developer information :numbered: 2 dev_overview - dev_migration dev_checklist dev_start dev_guidelines diff --git a/doc/sphinx/fmwk_cli.rst b/doc/sphinx/fmwk_cli.rst index a15d7bf2e..e8284ede2 100644 --- a/doc/sphinx/fmwk_cli.rst +++ b/doc/sphinx/fmwk_cli.rst @@ -1,7 +1,7 @@ Framework configuration and parsing =================================== -This section describes the :doc:`src.cli`, responsible for parsing input configuration. Familiarity with the python :py:mod:`argparse` module is recommended. +This section describes the :doc:`src.cli`, responsible for parsing input configuration. CLI functionality ----------------- @@ -13,11 +13,12 @@ Flexibility and extensibility are among the MDTF project's design goals, which m - Allow for specifying and recording user input in a file, to allow provenance of package runs and to eliminate the need for long strings of CLI flags. - Record whether the user has explicitly set an option (to a value which may or may not be the default), or whether the option is unset and its default value is being used. -- Define "plug-ins" for specific tasks (such as model data retrieval) which can define their own CLI settings. This is necessary to avoid confusing the user with settings that are irrelevant for their specified analysis; e.g. the ``--version-date`` flag used by the :ref:`ref-data-source-cmip6` data source would be meaningless for a source of data that didn't have a revision history. - Enable site-specific customizations, which can add to or modify any of the above properties. - Define CLIs through configuration files instead of code to streamline the process of defining all of the above. -No third-party CLI package implements all of the above features, so the MDTF package provides its own solution, described here. +The MDTF framework uses the `Python Click package `__ +to create the CLI from the runtime configuration file options, +eliminating the need for custom the CLI modules and plugins in prior versions of the code. .. _ref-cli-subcommands: @@ -42,35 +43,19 @@ Additional package manager-like commands could be added to allow users to select .. _ref-cli-plugins: -CLI Plugins -+++++++++++ - -"Plug-ins" provide different ways to implement the same type of task, following a common API. One example is obtaining model data from different sources: different code is needed for reading the sample model data from a local directory vs. accessing remote data via a catalog interface. In the plug-in system, the code for these two cases would be written as distinct data source plug-ins, and the data retrieval method to use would be selected at runtime by the user via the ``--data-manager`` CLI flag. This allows new functionalities to be developed and tested independently of each other, and without requiring changes to the common logic of the framework. - -The categories of plug-ins are fixed by the framework. Currently these are ``data_manager``, which retrieves model data, and ``environment_manager``, which sets up each POD's third-party code dependencies. Two other plug-ins are defined but not exposed to the user through the UI, because only one option is currently implemented for them: ``runtime_manager``, which controls how PODs are executed, and ``output_manager``, which controls how the PODs' output files are collected and processed. - -Allowed values for each of these plug-in categories are defined in the ``cli_plugins.jsonc`` files: the "base" one in ``/src``, and optionally one in the site-specific directory selected by the user. - -As noted in the overview above, for a manageable interface we need to allow each plug-in to define its own CLI options. These are defined in the ``cli`` attribute for each plug-in definition in the ``cli_plugins.jsonc`` file, following the syntax described below. When the CLI parser is being configured, the user input is first partially parsed to determine what plug-ins the user has selected, and then their specific CLI options are added to the "full" CLI parser. File-based CLI definition ------------------------- -The CLI for the package is constructed from a set of JSONC configuration files. The syntax for these files is essentially a direct JSON serialization of the arguments given to :py:class:`~argparse.ArgumentParser`, with a few extensions described below. +The CLI for the package is constructed from a runtime configuration file. +The syntax for these files is essentially a direct JSON serialization of the arguments given to :py:class:`~argparse.ArgumentParser`, +with a few extensions described below. Location of configuration files +++++++++++++++++++++++++++++++ The top-level configuration files have hard-coded names: -- `src/cli_subcommands.jsonc `__ to define the :ref:`subcommands `, and -- `src/cli_plugins.jsonc `__ to define the :ref:`plug-ins `. -- Files with these names in a site directory will override the contents of the above files in ``/src`` if that site is selected, e.g. `sites/NOAA_GFDL/cli_subcommands.jsonc `__. - -Plugins define their own CLI options in the ``cli`` attribute in their entry in the plugins file, using the syntax described below. On the other hand, each subcommand defines its CLI through a separate file, given in the ``cli_file`` attribute. Chief among these is - -- `src/cli_template.jsonc `__, which defines the CLI for running the package in the absence of site-specific modifications. - CLI configuration file syntax +++++++++++++++++++++++++++++ @@ -100,10 +85,6 @@ Use in the code :doc:`src.cli` defines a hierarchy of classes representing objects in a CLI parser specification, which are instantiated by values from the configuration files. At the root of the hierarchy is :class:`~src.cli.CLIConfigManager`, a Singleton which reads all the files, begins the object creation process, and stores the results. The other classes in the hierarchy are, in descending order: -- :class:`~src.cli.CLICommand`\: Dataclass representing a :ref:`subcommand ` or a :ref:`plug-in `. This wraps a parser (``parser`` attribute) and objects in the classes below, corresponding to configuration for that parser, which are initialized from the configuration files (``cli`` attribute.) It also implements a :meth:`~src.cli.CLICommand.call` method for dispatching parsed values to the initialization method of the class implementing the subcommand or plug-in. -- :class:`~src.cli.CLIParser`\: Dataclass representing arguments passed to the constructor for :py:class:`~argparse.ArgumentParser`. A parser object (next section) is configured with information in objects in the classes below via this class's :class:`~src.cli.CLIParser.configure` method. -- :class:`~src.cli.CLIArgumentGroup`\: Dataclass representing arguments passed to :py:meth:`~argparse.ArgumentParser.add_argument_group`. This only affects the formatting in the online help. -- :class:`~src.cli.CLIArgument`\: Dataclass representing arguments passed to :py:meth:`~argparse.ArgumentParser.add_argument`, as described above. CLI parsers @@ -112,12 +93,6 @@ CLI parsers Parser classes ++++++++++++++ -As described above, the CLI used on a specific run of the package depends on the values of some of the CLI arguments: the ``--site``, and the values chosen for recognized plug-ins. This introduces a chicken-and-egg level of complexity, in which we need to parse some arguments in order to determine how to proceed with the rest of the parsing. The :doc:`src.cli` does this by defining several parser classes, all of which inherit from :py:class:`~argparse.ArgumentParser`. - -- :class:`~src.cli.MDTFArgParser`: The base class for all parsers, which implements custom help formatting (:class:`~src.cli.CustomHelpFormatter`) and recording of user-provided vs. default values for options (via :class:`~src.cli.RecordDefaultsAction`) -- :class:`~src.cli.MDTFArgPreparser`: Child class used for partial parsing ("preparsing"). This is used in :meth:`~src.cli.MDTFTopLevelArgParser.init_user_defaults` to extract paths to file-based user input, in :meth:`~src.cli.MDTFTopLevelArgParser.init_site` to extract the site, and in :meth:`~src.cli.MDTFTopLevelArgParser.setup` to extract values for the subcommand and plug-in options before the full CLI is parsed. -- :class:`~src.cli.MDTFTopLevelArgParser`: Child class for the top-level CLI interface to the package. Has additional methods for formatting help text, and initiating the CLI configuration and parsing process described in detail below. -- :class:`~src.cli.MDTFTopLevelSubcommandArgParser`: Currently unused. Child class which would take care of parsing and dispatch to MDTF package :ref:`subcommands `. This is currently done by manual inspection of ``sys.argv`` in `mdtf_framework.py `__. .. _ref-cli-precedence: @@ -146,7 +121,7 @@ Building the CLI ++++++++++++++++ - The mdtf wrapper script activates the ``_MDTF_base`` conda environment and calls `mdtf_framework.py `__. -- mdtf_framework.py manually determines the subcommand from the currently recognized values, and constructs the CLI appropriate to it. In this example, we're running the package, so the :class:`~src.cli.MDTFTopLevelArgParser` is initialized and its :meth:`~src.cli.MDTFTopLevelArgParser.setup` method is called. +- mdtf_framework.py - This calls :meth:`~src.cli.MDTFTopLevelArgParser.init_user_defaults`, which parses the value of ``--input-file`` and, if set, reads the file and stores its contents in the ``user_defaults`` attribute of :class:`~src.cli.CLIConfigManager`. - It then calls :meth:`~src.cli.MDTFTopLevelArgParser.init_site`, which parses the value of the selected site and reads the site-specific defaults files (if any). @@ -173,12 +148,3 @@ Parsing CLI arguments - The :meth:`~src.cli.MDTFTopLevelArgParser.dispatch` then imports the modules for all selected plug-in objects. We do this import "on demand," rather than simply always importing everything, because a plug-in may make use of third-party modules that the user hasn't installed (e.g. if the plug-in is site-specific and the user is at a different site.) - Finally, :meth:`~src.cli.MDTFTopLevelArgParser.dispatch` calls the :meth:`~src.cli.CLICommand.call` method on the selected subcommand to hand off execution. As noted above, subcommand functionality is implemented but unused, so currently we always hand off the the first (only) subcommand, **mdtf run**, regardless of input. The corresponding entry point, as specified in `src/cli_plugins.jsonc `__, is the ``__init__`` method of :class:`~src.core.MDTFFramework`. -Extending the user interface ----------------------------- - -Currently, the only method for the user to configure a run of the package is the CLI described above, which parses command-line options and :ref:`configuration files `. - -In the future it may be desirable to provide additional invocation mechanisms, e.g. from a larger workflow engine or a web-based front end. - -Parsing and validation logic is split between the :doc:`src.cli` and the :class:`~src.core.MDTFFramework` class. In order to avoid duplicating logic and ensure that configuration gets parsed consistently across the different methods, the raw user input should be introduced into the chain of methods in the parsing logic (described above) as early as possible. - diff --git a/doc/sphinx/fmwk_toc.rst b/doc/sphinx/fmwk_toc.rst index 2453f10f9..b093650c9 100644 --- a/doc/sphinx/fmwk_toc.rst +++ b/doc/sphinx/fmwk_toc.rst @@ -43,14 +43,12 @@ Main framework modules ^^^^^^^^^^^^^^^^^^^^^^ .. autosummary:: - - src.core - src.data_manager + src.pod_setup src.data_sources - src.diagnostic src.environment_manager - src.preprocessor + src.translation src.output_manager + src.preprocessor Supporting framework modules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -62,6 +60,7 @@ Supporting framework modules src.data_model src.mdtf_info src.units + src.varlist_util src.verify_links src.xr_parser @@ -78,14 +77,6 @@ The ``src.util`` subpackage provides non-MDTF-specific utility functionality use src.util.exceptions src.util.filesystem src.util.logs + src.util.path_utils src.util.processes -Currently unused modules -^^^^^^^^^^^^^^^^^^^^^^^^ - -The following modules implement features moved to, or intended for, future releases. They can be removed from the current production branch with no effect. - -.. autosummary:: - - src.conflict_resolution - src.install diff --git a/mdtf_framework.py b/mdtf_framework.py index c6411b599..08922c88c 100755 --- a/mdtf_framework.py +++ b/mdtf_framework.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# This is the top-level python script for the MDTF diagnostics package. +# This is the top-level python script for the MDTF-diagnostics package. # See http://gfdl.noaa.gov/mdtf-diagnostics. # NOTE: Under the standard installation procedure, users should never call this @@ -8,6 +8,7 @@ # created during installation. import sys + # do version check before anything else if sys.version_info.major != 3 or sys.version_info.minor < 10: sys.exit("ERROR: The MDTF package requires python >= 3.10. Please check " @@ -15,56 +16,214 @@ f"Attempted to run with following python version:\n{sys.version}") # passed; continue with imports import os -from src import cli -from src.util import logs +import copy +import click +from src import util, cli, data_sources, pod_setup, preprocessor, translation, environment_manager, output_manager +import dataclasses +import logging +import datetime +import collections + + +_log = logging.getLogger(__name__) + +ConfigTuple = collections.namedtuple( + 'ConfigTuple', 'name backup_filename contents' +) +ConfigTuple.__doc__ = """ + Class wrapping general structs used for configuration. +""" + + +class MainLogger(util.MDTFObjectLoggerMixin, util.MDTFObjectLogger): + """Class to hold logging information for main driver script""" + log: dataclasses.InitVar = _log + name: str + + def __init__(self, log_dir: str): + if not os.path.exists: + os.mkdir(log_dir) + self.name = "MDTF_main.{:%Y-%m-%d:%H.%M.%S}".format(datetime.datetime.now()) + # Access MDTFObjectLogger attributes + super().__init__(name=self.name) + self.init_log(log_dir=log_dir) + + +def print_summary(pods, _log: logging.log): + def summary_info_tuple(pod): + """create tuple of ([failed cases], [not failed cases], POD_OUTPUT_DIR) for input pod + """ + return ( + [p_name for p_name, p in pod.multi_case_dict['CASE_LIST'].items() if pod.failed], + [p_name for p_name, p in pod.multi_case_dict['CASE_LIST'].items() if not pod.failed], + getattr(pod.paths, 'POD_OUTPUT_DIR', '') + ) + + d = {p_name: summary_info_tuple(p) for p_name, p in pods.items()} + failed = any(len(tup[0]) > 0 for tup in d.values()) + _log.info('\n' + (80 * '-')) + if failed: + _log.info(f"Exiting with errors.") + for case_name, tup in d.items(): + _log.info(f"Summary for {case_name}:") + if tup[0][0] == 'dummy sentinel string': + _log.info('\tAn error occurred in setup. No PODs were run.') + else: + if tup[1]: + _log.info((f"\tThe following PODs exited normally: " + f"{', '.join(tup[1])}")) + if tup[0]: + _log.info((f"\tThe following PODs raised errors: " + f"{', '.join(tup[0])}")) + _log.info(f"\tOutput written to {tup[2]}") + else: + _log.info(f"Exiting normally.") + for pod_name, tup in d.items(): + _log.info(f"Summary for {pod_name}:") + _log.info(f"\tAll PODs exited normally.") + _log.info(f"\tOutput written to {tup[2]}") + for pod_name, pod_atts in pods.items(): + pod_atts.status = util.ObjectStatus.SUCCEEDED -def validate_base_environment(): - """Check that the package's required third-party dependencies (listed in - src/conda/env_base.yml) are accessible. +@click.option('-f', + '--configfile', + required=True, + type=click.Path(), + help='Path to the runtime configuration file' + ) +@click.option("-v", + "--verbose", + is_flag=True, + default=False, + help="Enables verbose mode.") +@click.command() +@click.pass_context +def main(ctx, configfile: str, verbose: bool = False) -> int: + """A community-developed package to run Process Oriented Diagnostics on weather and climate data """ - # checking existence of one third-party module is imperfect, but will - # catch the most common case where user hasn't installed environments - try: - import cfunits - except ModuleNotFoundError: - sys.exit("ERROR: MDTF dependency check failed. Please make sure the " - "package's base environment has been activated prior to execution, e.g. " - "by calling the 'mdtf' wrapper script.\nSee installation instructions " - "at mdtf-diagnostics.rtfd.io/en/latest/sphinx/start_install.html.") - -def main(argv): - # get dir of currently executing script: - code_root = os.path.dirname(os.path.realpath(__file__)) + + def backup_config(config): + """Copy serializable version of parsed settings, in order to write + backup config file. + """ + d = copy.deepcopy(config) + d = {k: v for k, v in d.items() if not k.endswith('_is_default_')} + d['case_list'] = copy.deepcopy(config.case_list) + return ConfigTuple( + name='backup_config', + backup_filename='config_save.json', + contents=d + ) + # Cache log info in memory until log file is set up - logs.initial_log_config() - - # poor man's subparser: argparse's subparser doesn't handle this - # use case easily, so just dispatch on first argument - if len(argv) == 1 or \ - len(argv) == 2 and argv[1].lower() in ('-h', '--help'): - # case where we print CLI help - cli_obj = cli.MDTFTopLevelArgParser(code_root) - cli_obj.print_help() - return 0 # will actually exit from print_help - elif argv[1].lower() == 'info': - # case where we print command-line info on PODs - from src import mdtf_info - mdtf_info.InfoCLIHandler(code_root, argv[2:]) - return 0 # will actually exit from print_help + util.logs.initial_log_config() + + # print(f"=== Starting {os.path.realpath(__file__)}\n") + # NameSpace allows dictionary keys to be referenced with dot notation + ctx.config = util.NameSpace() + # parse the runtime config file + ctx.config = cli.parse_config_file(configfile) + # Test ctx.config + # print(ctx.config.WORK_DIR) + ctx.config.CODE_ROOT = os.path.dirname(os.path.realpath(__file__)) + ctx.config.TEMP_DIR_ROOT = ctx.config.WORK_DIR + log_config = cli.read_config_file( + ctx.config.CODE_ROOT, "src", "logging.jsonc" + ) + cli.verify_runtime_config_options(ctx.config) + # Initialize the model path object and define the model data output paths + make_new_work_dir = not ctx.config.overwrite + model_paths = util.ModelDataPathManager(ctx.config, + new_work_dir=make_new_work_dir) + model_paths.setup_data_paths(ctx.config.case_list) + ctx.config.update({'WORK_DIR': model_paths.WORK_DIR}) + ctx.config.update({'OUTPUT_DIR': model_paths.OUTPUT_DIR}) + # TODO: update paths in ctx.config so that POD paths are placed in the correct sub-directories + backup_config = backup_config(ctx.config) + ctx.config._configs = dict() + ctx.config._configs[backup_config.name] = backup_config + ctx.config._configs['log_config'] = ConfigTuple( + name='log_config', + backup_filename=None, + contents=log_config + ) + + # Set up main logger + log = MainLogger(log_dir=model_paths.WORK_DIR) + if verbose: + log.log.debug("Initialized cli context") + # configure a variable translator object with information from Fieldlist tables + var_translator = translation.VariableTranslator(ctx.config.CODE_ROOT) + var_translator.read_conventions(ctx.config.CODE_ROOT) + + # initialize the preprocessor (dummy pp object if run_pp=False) + data_pp = preprocessor.init_preprocessor(model_paths, + ctx.config, + ctx.config.run_pp + ) + # set up the case data source dictionary + cases = dict() + for case_name, case_dict in ctx.config.case_list.items(): + # instantiate the data_source class instance for the specified convention + cases[case_name] = data_sources.data_source[case_dict.convention.upper() + "DataSource"](case_name, + case_dict, + model_paths, + parent=None) + cases[case_name].set_date_range(case_dict.startdate, case_dict.enddate) + + pods = dict.fromkeys(ctx.config.pod_list, []) + pod_runtime_reqs = dict() + # configure pod object(s) + for pod_name in ctx.config.pod_list: + pods[pod_name] = pod_setup.PodObject(pod_name, ctx.config) + pods[pod_name].setup_pod(ctx.config, model_paths, cases) + pods[pod_name].log.info(f"Preprocessing data for {pod_name}") + for k, v in pods[pod_name].runtime_requirements.items(): + if not hasattr(pod_runtime_reqs, k): + pod_runtime_reqs[k] = v + # read the subset of data for the cases and date range(s) and preprocess the data + cat_subset = data_pp.process(cases, ctx.config, model_paths.MODEL_WORK_DIR) + # write the preprocessed files + data_pp.write_ds(cases, cat_subset, pod_runtime_reqs) + # write the ESM intake catalog for the preprocessed files + data_pp.write_pp_catalog(cat_subset, model_paths, log.log) + # configure the runtime environments and run the POD(s) + if not any(p.failed for p in pods.values()): + log.log.info("### %s: running pods '%s'.", [p for p in pods.keys()]) + run_mgr = environment_manager.SubprocessRuntimeManager(pods, ctx.config, log) + run_mgr.setup() + run_mgr.run(cases, log) else: - # case where we run the actual framework - print(f"=== Starting {os.path.realpath(__file__)}\n") - validate_base_environment() + for p in pods.values: + if any(p.failed): + log.log.info("Data request for pod '%s' failed; skipping execution.", p) - # not printing help or info, setup CLI normally - cli_obj = cli.MDTFTopLevelArgParser(code_root,argv=argv) - framework = cli_obj.dispatch() - exit_code = framework.main() - return exit_code + # convert POD figure files if necessary + # generate html output + for p in pods.values(): + out_mgr = output_manager.HTMLOutputManager(p, ctx.config) + out_mgr.make_output(p, ctx.config) + + # clean up temporary directories + tempdirs = util.TempDirManager(ctx.config) + tempdirs.cleanup() + + print_summary(pods, log.log) + # close the varlistEntry log handlers + for case_name, case_dict in cases.items(): + for var in case_dict.iter_children(): + var._log_handler.close() + + # close the main log file + log._log_handler.close() + + if not any(v.failed for v in pods.values()): + return util.exit_handler(code=0) + else: + return util.exit_handler(code=1) if __name__ == '__main__': - argv = sys.argv[1::] if len(sys.argv[1::]) >= 2 else sys.argv - exit_code = main(argv) + exit_code = main(prog_name='MDTF-diagnostics') sys.exit(exit_code) diff --git a/setup.py b/setup.py index 2c8ce5c00..8b1110573 100644 --- a/setup.py +++ b/setup.py @@ -45,10 +45,13 @@ def run(self): 'Programming Language :: Python', 'Programming Language :: Python :: 3.11', ], - scripts=[ - 'mdtf_framework.py' - ], - packages=packages, + packages=find_packages(), + include_package_data=True, + entry_points={ + 'console_scripts': [ + 'mdtf = MDTF-diagnostics.mdtf_framework:cli', + ], + }, cmdclass={ # hook for post-install commands 'develop': PostDevelopCommand, 'install': PostInstallCommand diff --git a/sites/NOAA_GFDL/gfdl.py b/sites/NOAA_GFDL/gfdl.py index 701cb7133..7ab517724 100644 --- a/sites/NOAA_GFDL/gfdl.py +++ b/sites/NOAA_GFDL/gfdl.py @@ -8,9 +8,8 @@ import shutil import tempfile import pandas as pd -from src import (util, core, diagnostic, data_manager, data_sources, +from src import (util, pod_setup, data_manager, data_sources, preprocessor, environment_manager, output_manager, cmip6) -from src import query_fetch_preprocess as qfp from sites.NOAA_GFDL import gfdl_util import logging @@ -21,114 +20,107 @@ class GFDLMDTFFramework(core.MDTFFramework): def parse_mdtf_args(self, cli_obj, pod_info_tuple): super(GFDLMDTFFramework, self).parse_mdtf_args(cli_obj, pod_info_tuple) - self.dry_run = cli_obj.config.get('dry_run', False) - self.timeout = cli_obj.config.get('file_transfer_timeout', 0) + # set up cooperative mode -- hack to pass config settings self.frepp_mode = cli_obj.config.get('frepp', False) if self.frepp_mode: cli_obj.config['diagnostic'] = 'Gfdl' - def parse_env_vars(self, cli_obj): - super(GFDLMDTFFramework, self).parse_env_vars(cli_obj) - # set temp directory according to where we're running - if gfdl_util.running_on_PPAN(): - gfdl_tmp_dir = cli_obj.config.get('GFDL_PPAN_TEMP', '$TMPDIR') - else: - gfdl_tmp_dir = cli_obj.config.get('GFDL_WS_TEMP', '$TMPDIR') - gfdl_tmp_dir = util.resolve_path( - gfdl_tmp_dir, root_path=self.code_root, env=self.global_env_vars, - log=_log - ) - if not os.path.isdir(gfdl_tmp_dir): - gfdl_util.make_remote_dir(gfdl_tmp_dir, log=_log) - tempfile.tempdir = gfdl_tmp_dir - os.environ['MDTF_TMPDIR'] = gfdl_tmp_dir - self.global_env_vars['MDTF_TMPDIR'] = gfdl_tmp_dir - def _post_parse_hook(self, cli_obj, config, paths): - # call parent class method - super(GFDLMDTFFramework, self)._post_parse_hook(cli_obj, config, paths) - - self.reset_case_pod_list(cli_obj, config, paths) - # copy obs data from site install - gfdl_util.fetch_obs_data( - paths.OBS_DATA_REMOTE, paths.OBS_DATA_ROOT, - timeout=self.timeout, dry_run=self.dry_run, log=_log - ) - - def reset_case_pod_list(self, cli_obj, config, paths): - if self.frepp_mode: - for case in self.iter_children(): - # frepp mode:only attempt PODs other instances haven't already done - case_outdir = paths.modelPaths(case, overwrite=True) - case_outdir = case_outdir.MODEL_OUT_DIR - pod_list = case['pod_list'] - for p in pod_list: - if os.path.isdir(os.path.join(case_outdir, p)): - case.log.info(("\tPreexisting {} in {}; " - "skipping b/c frepp mode").format(p, case_outdir)) - case['pod_list'] = [p for p in pod_list if not \ - os.path.isdir(os.path.join(case_outdir, p)) - ] - - def verify_paths(self, config, p): - keep_temp = config.get('keep_temp', False) - # clean out WORKING_DIR if we're not keeping temp files: - if os.path.exists(p.WORKING_DIR) and not \ - (keep_temp or p.WORKING_DIR == p.OUTPUT_DIR): - gfdl_util.rmtree_wrapper(p.WORKING_DIR) - - try: - for dir_name, create_ in ( - ('CODE_ROOT', False), ('OBS_DATA_REMOTE', False), - ('OBS_DATA_ROOT', True), ('MODEL_DATA_ROOT', True), ('WORKING_DIR', True) - ): - util.check_dir(p, dir_name, create=create_) - except Exception as exc: - _log.fatal((f"Input settings for {dir_name} mis-specified (caught " - f"{repr(exc)}.)")) - util.exit_handler(code=1) - - # Use GCP to create OUTPUT_DIR on a volume that may be read-only - if not os.path.exists(p.OUTPUT_DIR): - gfdl_util.make_remote_dir(p.OUTPUT_DIR, self.timeout, self.dry_run, - log=_log) # ==================================================================== -@util.mdtf_dataclass -class GfdlDiagnostic(diagnostic.Diagnostic): +class GfdlPodObject(pod_setup.PodObject): """Wrapper for Diagnostic that adds writing a placeholder directory (POD_OUT_DIR) to the output as a lockfile if we're running in frepp cooperative mode. """ # extra dataclass fields _has_placeholder: bool = False + frepp_mode: bool = False + timeout: int = 0 - def pre_run_setup(self): + def __init__(self, name: str, runtime_config: util.NameSpace): """Extra code only applicable in frepp cooperative mode. If this code is called, all the POD's model data has been generated. Write a placeholder directory to POD_OUT_DIR, so if frepp invokes the MDTF package again while we're running, only our results will be written to the overall output. """ - super(GfdlDiagnostic, self).pre_run_setup() + super(GfdlPodObject, self).__init__(name, runtime_config) - config = core.ConfigManager() - frepp_mode = config.get('frepp', False) - if frepp_mode and not os.path.exists(self.POD_OUT_DIR): + self.frepp_mode = runtime_config.get('frepp', False) + self.timeout = runtime_config.get('file_transfer_timeout', 0) + if self.frepp_mode and not os.path.exists(self.POD_OUT_DIR): try: gfdl_util.make_remote_dir(self.POD_OUT_DIR, log=self.log) self._has_placeholder = True except Exception as exc: chained_exc = util.chain_exc(exc, (f"Making output directory at " - f"{self.POD_OUT_DIR}."), util.PodRuntimeError) + f"{self.POD_OUT_DIR}."), util.PodRuntimeError) self.deactivate(chained_exc) + def reset_case_pod_list(self, runtime_config): + if self.frepp_mode: + for case in self.iter_children(): + # frepp mode:only attempt PODs other instances haven't already done + case_outdir = self.paths.modelPaths(case, overwrite=True) + case_outdir = case_outdir.MODEL_OUT_DIR + + def verify_paths(self, config): + keep_temp = config.get('keep_temp', False) + # clean out WORKING_DIR if we're not keeping temp files: + if os.path.exists(self.WORK_DIR) and not \ + (keep_temp or self.WORK_DIR == self.OUTPUT_DIR): + gfdl_util.rmtree_wrapper(self.WORK_DIR) + + try: + for dir_name, create_ in ( + ('CODE_ROOT', False), ('OBS_DATA_REMOTE', False), + ('OBS_DATA_ROOT', True), ('MODEL_DATA_ROOT', True), ('WORK_DIR', True) + ): + util.check_dir(p, dir_name, create=create_) + except Exception as exc: + _log.fatal((f"Input settings for {dir_name} mis-specified (caught " + f"{repr(exc)}.)")) + util.exit_handler(code=1) + + # Use GCP to create OUTPUT_DIR on a volume that may be read-only + if not os.path.exists(p.OUTPUT_DIR): + gfdl_util.make_remote_dir(p.OUTPUT_DIR, self.timeout, self.dry_run, + log=_log) + + def parse_env_vars(self, runtime_config: util.NameSpace): + # set temp directory according to where we're running + if gfdl_util.running_on_PPAN(): + gfdl_tmp_dir = runtime_config.get('GFDL_PPAN_TEMP', '$TMPDIR') + else: + gfdl_tmp_dir = runtime_config.get('GFDL_WS_TEMP', '$TMPDIR') + gfdl_tmp_dir = util.resolve_path( + gfdl_tmp_dir, root_path=self.code_root, env=self.global_env_vars, + log=_log + ) + if not os.path.isdir(gfdl_tmp_dir): + gfdl_util.make_remote_dir(gfdl_tmp_dir, log=_log) + tempfile.tempdir = gfdl_tmp_dir + os.environ['MDTF_TMPDIR'] = gfdl_tmp_dir + self.global_env_vars['MDTF_TMPDIR'] = gfdl_tmp_dir + + def _post_parse_hook(self, config, paths): + # call parent class method + + self.reset_case_pod_list(config, paths) + # copy obs data from site install + gfdl_util.fetch_obs_data( + self.paths.OBS_DATA_REMOTE, self.paths.OBS_DATA_ROOT, + timeout=self.timeout, log=_log + ) + + # ------------------------------------------------------------------------ -class GCPFetchMixin(qfp.AbstractFetchMixin): +class GCPFetchMixin: """Mixin implementing data fetch for netcdf files on filesystems accessible from GFDL via GCP. Remote files are copies to a local temp directory. dmgets are issued for remote files on tape filesystems. @@ -148,18 +140,17 @@ def pre_fetch_hook(self, vars_to_fetch): if self.tape_filesystem: paths = set([]) for var in vars_to_fetch: - for d_key in var.iter_data_keys(status=core.ObjectStatus.ACTIVE): + for d_key in var.iter_data_keys(status=util.ObjectStatus.ACTIVE): paths.update(d_key.remote_data()) for d_key in var.iter_associated_files_keys( - status=core.ObjectStatus.ACTIVE + status=util.ObjectStatus.ACTIVE ): paths.update(d_key.remote_data()) self.log.info(f"Start dmget of {len(paths)} files...") util.run_command(['dmget','-t','-v'] + list(paths), - timeout= len(paths) * self.timeout, - dry_run=self.dry_run, log=self.log - ) + timeout=len(paths) * self.timeout, log=self.log + ) self.log.info("Successful exit of dmget.") def _get_fetch_method(self, method=None): @@ -182,7 +173,7 @@ def fetch_dataset(self, var, d_key): """Copy files to temporary directory. (GCP can't copy to home dir, so always copy to a temp dir) """ - tmpdir = core.TempDirManager().make_tempdir() + tmpdir = util.TempDirManager().make_tempdir() self.log.debug("Created GCP fetch temp dir at %s.", tmpdir) (cp_command, smartsite) = self._get_fetch_method(self._fetch_method) @@ -208,29 +199,28 @@ def fetch_dataset(self, var, d_key): d_key.local_data = local_paths -class GFDL_GCP_FileDataSourceBase( - qfp.OnTheFlyDirectoryHierarchyQueryMixin, - GCPFetchMixin, - data_manager.DataframeQueryDataSourceBase -): - """Base class for DataSources that access data on GFDL's internal filesystems +class GFDL_GCP_FileDataSourceBase(ABC): + """Base class for multirun DataSources that access data on GFDL's internal filesystems using GCP, and which may be invoked via frepp. """ - _DiagnosticClass = GfdlDiagnostic - _PreprocessorClass = preprocessor.DefaultPreprocessor - - _FileRegexClass = util.abstract_attribute() - _DirectoryRegex = util.abstract_attribute() - _AttributesClass = util.abstract_attribute() - _fetch_method = 'auto' # symlink if not on /archive, else gcp + _DiagnosticClass = MultirunGfdlDiagnostic + _PreprocessorClass = preprocessor.MultirunDefaultPreprocessor def __init__(self, case_dict, parent): - self.catalog = None - super(GFDL_GCP_FileDataSourceBase, self).__init__(case_dict, parent) - + super(Multirun_GFDL_GCP_FileDataSourceBase, self).__init__(case_dict, parent) + # borrow MDTFObjectBase initialization from data_manager:~DataSourceBase + core.MDTFObjectBase.__init__( + self, name=case_dict['CASENAME'], _parent=parent + ) + # default behavior when run interactively: + # frepp_mode = False, any_components = True + # default behavior when invoked by FRE wrapper: + # frepp_mode = True (set to False by calling wrapper with --run_once) + # any_components = True (set to False with --component_only) config = core.ConfigManager() self.frepp_mode = config.get('frepp', False) self.dry_run = config.get('dry_run', False) + self.any_components = config.get('any_components', False) self.timeout = config.get('file_transfer_timeout', 0) if self.frepp_mode: @@ -244,10 +234,11 @@ def __init__(self, case_dict, parent): self.MODEL_WK_DIR = d.MODEL_WK_DIR self.MODEL_OUT_DIR = d.MODEL_OUT_DIR - @abc.abstractmethod - def query_associated_files(self, d_key): - """abstract method for querying dataframe for associated files""" - pass + @property + def _children(self): + """Iterable of the multirun varlist that is associated with the data source object + """ + yield from self.varlist.iter_vars() @util.mdtf_dataclass @@ -460,12 +451,6 @@ class GfdlppDataManager(GFDL_GCP_FileDataSourceBase): _DirectoryRegex = pp_dir_regex _AttributesClass = PPDataSourceAttributes - @property - def col_spec(self): - if self.any_components: - return gfdlppDataManager_any_components_col_spec - else: - return gfdlppDataManager_same_components_col_spec # map "name" field in VarlistEntry's query_attrs() to "variable" field of # PPTimeseriesDataFile @@ -482,15 +467,6 @@ def __init__(self, case_dict, parent): self.frepp_mode = config.get('frepp', False) self.any_components = config.get('any_components', False) - @property - def pod_expt_key_cols(self): - """Catalog columns whose values must be the same for each POD, but can - differ for different PODs. - """ - if self.frepp_mode and not self.any_components: - return 'component' - else: - return tuple() def query_associated_files(self, d_key): """Infers static file from variable's component and assigns data key @@ -685,65 +661,6 @@ def __new__(cls, case_dict, parent, *args, **kwargs): def __init__(self, *args, **kwargs): pass -# ------------------------------------------------------------------------ - -class GfdlvirtualenvEnvironmentManager( - environment_manager.VirtualenvEnvironmentManager - ): - # Use module files to switch execution environments, as defined on - # GFDL workstations and PP/AN cluster. - - def __init__(self, log=_log): - _ = gfdl_util.ModuleManager() - super(GfdlvirtualenvEnvironmentManager, self).__init__(log=log) - - # TODO: manual-coded logic like this is not scalable - def set_pod_env(self, pod): - langs = [s.lower() for s in pod.runtime_requirements] - if pod.name == 'convective_transition_diag': - pod.env = 'py_convective_transition_diag' - elif pod.name == 'MJO_suite': - pod.env = 'ncl_MJO_suite' - elif ('r' in langs) or ('rscript' in langs): - pod.env = 'r_default' - elif 'ncl' in langs: - pod.env = 'ncl' - else: - pod.env = 'py_default' - - # TODO: manual-coded logic like this is not scalable - _module_lookup = { - 'ncl': ['ncl'], - 'r_default': ['r'], - 'py_default': ['python'], - 'py_convective_transition_diag': ['python', 'ncl'], - 'ncl_MJO_suite': ['python', 'ncl'] - } - - def create_environment(self, env_name): - modMgr = gfdl_util.ModuleManager() - modMgr.load(self._module_lookup[env_name]) - super(GfdlvirtualenvEnvironmentManager, \ - self).create_environment(env_name) - - def activate_env_commands(self, env_name): - modMgr = gfdl_util.ModuleManager() - mod_list = modMgr.load_commands(self._module_lookup[env_name]) - return ['source $MODULESHOME/init/bash'] \ - + mod_list \ - + super(GfdlvirtualenvEnvironmentManager, self).activate_env_commands(env_name) - - def deactivate_env_commands(self, env_name): - modMgr = gfdl_util.ModuleManager() - mod_list = modMgr.unload_commands(self._module_lookup[env_name]) - return super(GfdlvirtualenvEnvironmentManager, \ - self).deactivate_env_commands(env_name) + mod_list - - def tear_down(self): - super(GfdlvirtualenvEnvironmentManager, self).tear_down() - modMgr = gfdl_util.ModuleManager() - modMgr.revert_state() - class GfdlcondaEnvironmentManager(environment_manager.CondaEnvironmentManager): # Use miniconda3 in the mdtf role account def _call_conda_create(self, env_name): @@ -753,130 +670,4 @@ def _call_conda_create(self, env_name): # ------------------------------------------------------------------------ -class GFDLHTMLPodOutputManager(output_manager.HTMLPodOutputManager): - def __init__(self, pod, output_mgr): - super(GFDLHTMLPodOutputManager, self).__init__(pod, output_mgr) - config = core.ConfigManager() - self.frepp_mode = config.get('frepp', False) - - def make_output(self): - """Only run output steps (including logging error on index.html) - if POD ran on this invocation. - """ - if not self.frepp_mode: - super(GFDLHTMLPodOutputManager, self).make_output() - elif getattr(self.obj, '_has_placeholder', False): - self.obj.log.debug('POD %s has frepp placeholder, generating output.', - self.obj.name) - super(GFDLHTMLPodOutputManager, self).make_output() - else: - self.obj.log.debug(('POD %s does not have frepp placeholder; not ' - 'generating output.'), self.obj.name) - -class GFDLHTMLOutputManager(output_manager.HTMLOutputManager): - _PodOutputManagerClass = GFDLHTMLPodOutputManager - - def __init__(self, case): - config = core.ConfigManager() - try: - self.frepp_mode = config.get('frepp', False) - self.dry_run = config.get('dry_run', False) - self.timeout = config.get('file_transfer_timeout', 0) - except (AttributeError, KeyError) as exc: - case.log.store_exception(exc) - - super(GFDLHTMLOutputManager, self).__init__(case) - - def make_html(self, cleanup=False): - """Never cleanup html if we're in frepp_mode, since framework may run - later when another component finishes. Instead just append current - progress to CASE_TEMP_HTML. - """ - prev_html = os.path.join(self.OUT_DIR, self._html_file_name) - if self.frepp_mode and os.path.exists(prev_html): - self.obj.log.debug("Found previous HTML at %s; appending.", self.OUT_DIR) - with io.open(prev_html, 'r', encoding='utf-8') as f1: - contents = f1.read() - contents = contents.split('') - assert len(contents) == 3 - contents = contents[1] - - if os.path.exists(self.CASE_TEMP_HTML): - mode = 'a' - else: - self.obj.log.warning("No file at %s.", self.CASE_TEMP_HTML) - mode = 'w' - with io.open(self.CASE_TEMP_HTML, mode, encoding='utf-8') as f2: - f2.write(contents) - super(GFDLHTMLOutputManager, self).make_html( - cleanup=(not self.frepp_mode) - ) - @property - def _tarball_file_path(self): - paths = core.PathManager() - assert hasattr(self, 'WK_DIR') - file_name = self.WK_DIR + '.tar' - return os.path.join(paths.WORKING_DIR, file_name) - - def make_tar_file(self): - """Make the tar file locally in WK_DIR and gcp to destination, - since OUT_DIR might be mounted read-only. - """ - paths = core.PathManager() - out_path = super(GFDLHTMLOutputManager, self).make_tar_file() - _, file_name = os.path.split(out_path) - tar_dest_path = os.path.join(paths.OUTPUT_DIR, file_name) - gfdl_util.gcp_wrapper(out_path, tar_dest_path, log=self.obj.log) - return tar_dest_path - - def copy_to_output(self): - """Use gcp for transfer, since OUTPUT_DIR might be mounted read-only. - Also has special logic to handle frepp_mode. - """ - if self.WK_DIR == self.OUT_DIR: - return # no copying needed - if self.frepp_mode: - # only copy PODs that ran, whether they succeeded or not - for pod in self.obj.iter_children(): - if pod._has_placeholder: - gfdl_util.gcp_wrapper( - pod.POD_WK_DIR, pod.POD_OUT_DIR, log=pod.log - ) - # copy all case-level files - self.obj.log.debug("Copying case-level files in %s", self.WK_DIR) - for f in os.listdir(self.WK_DIR): - if os.path.isfile(os.path.join(self.WK_DIR, f)): - self.obj.log.debug("Found case-level file %s", f) - gfdl_util.gcp_wrapper( - os.path.join(self.WK_DIR, f), self.OUT_DIR, log=self.obj.log - ) - else: - # copy everything at once - if os.path.exists(self.OUT_DIR): - if self.overwrite: - try: - self.obj.log.error('%s exists, attempting to remove.', self.OUT_DIR) - gfdl_util.rmtree_wrapper(self.OUT_DIR) - except OSError: - # gcp will not overwrite dirs, so forced to save under - # a different name despite overwrite=True - self.obj.log.error(("Couldn't remove %s (probably mounted read" - "-only); will rename new directory."), self.OUT_DIR) - else: - self.obj.log.error("%s exists; will rename new directory.", self.OUT_DIR) - try: - if os.path.exists(self.OUT_DIR): - # check again, since rmtree() might have succeeded - self.OUT_DIR, version = \ - util.bump_version(self.OUT_DIR) - new_wkdir, _ = \ - util.bump_version(self.WK_DIR, new_v=version) - self.obj.log.debug("Move %s to %s", self.WK_DIR, new_wkdir) - shutil.move(self.WK_DIR, new_wkdir) - self.WK_DIR = new_wkdir - gfdl_util.gcp_wrapper(self.WK_DIR, self.OUT_DIR, log=self.obj.log) - except Exception: - raise # only delete MODEL_WK_DIR if copied successfully - self.obj.log.debug('Transfer succeeded; deleting directory %s', self.WK_DIR) - gfdl_util.rmtree_wrapper(self.WK_DIR) diff --git a/sites/NOAA_GFDL/gfdl_multirun.py b/sites/NOAA_GFDL/gfdl_multirun.py index 4e94bc964..be36046ca 100644 --- a/sites/NOAA_GFDL/gfdl_multirun.py +++ b/sites/NOAA_GFDL/gfdl_multirun.py @@ -42,49 +42,6 @@ def pre_run_setup(self): f"{self.POD_OUT_DIR}."), util.PodRuntimeError) self.deactivate(chained_exc) -class Multirun_GFDL_GCP_FileDataSourceBase(multirun.MultirunDataframeQueryDataSourceBase, - qfp.MultirunDataSourceQFPMixin, - gfdl.GFDL_GCP_FileDataSourceBase, - ABC): - """Base class for multirun DataSources that access data on GFDL's internal filesystems - using GCP, and which may be invoked via frepp. - """ - _DiagnosticClass = MultirunGfdlDiagnostic - _PreprocessorClass = preprocessor.MultirunDefaultPreprocessor - - def __init__(self, case_dict, parent): - super(Multirun_GFDL_GCP_FileDataSourceBase, self).__init__(case_dict, parent) - # borrow MDTFObjectBase initialization from data_manager:~DataSourceBase - core.MDTFObjectBase.__init__( - self, name=case_dict['CASENAME'], _parent=parent - ) - # default behavior when run interactively: - # frepp_mode = False, any_components = True - # default behavior when invoked by FRE wrapper: - # frepp_mode = True (set to False by calling wrapper with --run_once) - # any_components = True (set to False with --component_only) - config = core.ConfigManager() - self.frepp_mode = config.get('frepp', False) - self.dry_run = config.get('dry_run', False) - self.any_components = config.get('any_components', False) - self.timeout = config.get('file_transfer_timeout', 0) - - if self.frepp_mode: - paths = core.PathManager() - self.overwrite = True - # flag to not overwrite config and .tar: want overwrite for frepp - self.file_overwrite = True - # if overwrite=False, WK_DIR & OUT_DIR will have been set to a - # unique name in parent's init. Set it back so it will be overwritten. - d = paths.model_paths(self, overwrite=True) - self.MODEL_WK_DIR = d.MODEL_WK_DIR - self.MODEL_OUT_DIR = d.MODEL_OUT_DIR - - @property - def _children(self): - """Iterable of the multirun varlist that is associated with the data source object - """ - yield from self.varlist.iter_vars() class MultirunGfdlarchivecmip6DataManager(Multirun_GFDL_GCP_FileDataSourceBase, gfdl.Gfdlarchivecmip6DataManager, ABC): diff --git a/src/cli.py b/src/cli.py index d352c05ce..839eea83e 100644 --- a/src/cli.py +++ b/src/cli.py @@ -4,1403 +4,159 @@ Familiarity with the python :py:mod:`argparse` module is recommended. """ import os -import sys -import io -import argparse -import collections -import dataclasses -import importlib -import itertools -import json -import operator -import shlex -import re -import textwrap -import typing +import pathlib +import yaml +from datetime import datetime from src import util import logging _log = logging.getLogger(__name__) -_SCRIPT_NAME = 'mdtf.py' # mimic argparse error message text - -def canonical_arg_name(str_): - """Convert a flag or other specification to a destination variable name. - The destination variable name always has underscores, never hyphens, in - accordance with PEP8. - - E.g., ``canonical_arg_name('--GNU-style-flag')`` returns "GNU_style_flag". - """ - return str_.lstrip('-').rstrip().replace('-', '_') - - -def plugin_key(plugin_name): - """Convert user input for plugin options to string used to lookup plugin - value from options defined in cli_plugins.jsonc files. - - Ignores spaces and underscores in supplied choices for CLI plugins, and - make matching of plugin names case-insensititve. - """ - return re.sub(r"[\s_]+", "", plugin_name).lower() - - -def word_wrap(str_): - """Clean whitespace and perform 80-column word wrapping for multi-line help - and description strings. Explicit paragraph breaks must be encoded as a - double newline \(``\\n\\n``\). - """ - paragraphs = textwrap.dedent(str_).split('\n\n') - paragraphs = [re.sub(r'\s+', ' ', s).strip() for s in paragraphs] - paragraphs = [textwrap.fill(s, width=80) for s in paragraphs] - return '\n\n'.join(paragraphs) - - -def read_config_files(code_root, file_name, site=""): - """Utility function to read a pair of configuration files: one for the - framework defaults, another optional one for site-specific configuration. - - Args: - code_root (str): Code repo directory. - file_name (str): Name of file to search for. We search for the file - in all subdirectories of :meth:`CLIConfigManager.site_dir` - and :meth:`CLIConfigManager.framework_dir`, respectively. - site (str): Name of the site-specific directory (in ``/sites``) to search. - - Returns: - A tuple of the two files' contents. First element is the - site specific file (empty dict if that file isn't found) and second - is the framework file (if not found, fatal error and exit immediately.) - """ - src_dir = os.path.join(code_root, 'src') - site_dir = os.path.join(code_root, 'sites', site) - site_d = util.find_json(site_dir, file_name, exit_if_missing=False, log=_log) - fmwk_d = util.find_json(src_dir, file_name, exit_if_missing=True, log=_log) - return site_d, fmwk_d - - -def read_config_file(code_root, file_name, site=""): +def read_config_file(code_root: str, file_dir: str, file_name: str) -> str: """Return the site's config file if present, else the framework's file. Wraps :func:`read_config_files`. Args: code_root (str): Code repo directory. - file_name (str): Name of file to search for. We search for the file - in all subdirectories of :meth:`CLIConfigManager.site_dir` - and :meth:`CLIConfigManager.framework_dir`, respectively. - site (str): Name of the site-specific directory (in ``/sites``) to search. + file_dir (str): subdirectory name or path in code_root that contains target file + file_name (str): Name of file to search for. Returns: Path to the configuration file. """ - site_d, fmwk_d = read_config_files(code_root, file_name, site=site) - if not site_d: - return fmwk_d - return site_d - - -class CustomHelpFormatter( - argparse.RawDescriptionHelpFormatter, - argparse.ArgumentDefaultsHelpFormatter -): - """Modify help text formatter to only display variable placeholder text - ("metavar") once, to save space. Taken from - ``__. Also inherit from - :py:class:`argparse.RawDescriptionHelpFormatter` in order to preserve line - breaks in description only (``__). - """ - - def __init__(self, *args, **kwargs): - # tweak indentation of help strings - if not kwargs.get('indent_increment', None): - kwargs['indent_increment'] = 2 - if not kwargs.get('max_help_position', None): - kwargs['max_help_position'] = 6 - super(CustomHelpFormatter, self).__init__(*args, **kwargs) - - def _format_action_invocation(self, action): - if not action.option_strings: - metavar, = self._metavar_formatter(action, action.dest)(1) - return metavar - else: - parts = [] - if action.nargs == 0: - # if the Optional doesn't take a value, format is: "-s, --long" - parts.extend(action.option_strings) - else: - # if the Optional takes a value, format is: "-s ARGS, --long ARGS" - default = action.dest.upper() - args_string = self._format_args(action, default) - if args_string[0].isalpha(): - args_string = '<' + args_string + '>' - parts.extend(action.option_strings) - parts[-1] += ' %s' % args_string - return ', '.join(parts) - - def _get_help_string(self, action): - """Only print the argument's default in the help string if it's defined. - Based on ``__. - """ - help_str = action.help - if help_str == argparse.SUPPRESS: - # ignore hidden CLI items - return help_str - if action.default not in (None, argparse.SUPPRESS) \ - and '%(default)' not in help_str: - defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] - if action.option_strings or action.nargs in defaulting_nargs: - if isinstance(action.default, str): - help_str += " (default: '%(default)s')" - else: - help_str += " (default: %(default)s)" - return help_str - - -class RecordDefaultsAction(argparse.Action): - """Argparse :py:class:`~argparse.Action` that adds a boolean to record if user - actually set the argument's value, or if we're using the default value specified - in the parser. From ``__. This also - re-implements the 'store_true' and 'store_false' actions, in order to give - defaults information on boolean flags. - - If the user specifies a value for an option named ``
Time period, {{FIRSTYR}}-{{LASTYR}} +< color=navy>Time period, {{startdate}}-{{enddate}} {{CASENAME}} OBS
"\ + "{{CASENAME}}\n" + for case_name, case_settings in case_info.items(): + case_settings['PODNAME'] = template_dict['PODNAME'] + output_template = util._DoubleBraceTemplate(case_template).safe_substitute(case_settings) + dest_file_handle.write(output_template) + + def append_case_info_html(self, case_info: dict, dest_file_handle: io.TextIOWrapper): + """append_case_info_html: append case figures to the POD html template + + Arguments: case_info (nested dict): dictionary with information for each case + dest_file_handle (io.TextIO): output html file io stream + """ + + case_settings_header_html_template = """
+

+

Case Settings + + """ + + dest_file_handle.write(case_settings_header_html_template) + + # write the settings per case. First header. + # This prints the whole html_template = str(case_dict) + + case_settings_template = """
{{CASENAME}}\n\ + Date Range: {{startdate}} - {{enddate}}\n + """ + + for case_name, case_settings in case_info.items(): + output_template = util._DoubleBraceTemplate(case_settings_template).safe_substitute(case_settings) + dest_file_handle.write(output_template) + + pod_settings_header_html_template = """
+

+ +
POD Settings\n +
Driver script: {{driver}}\n + """ + output_template = ( + util._DoubleBraceTemplate(pod_settings_header_html_template).safe_substitute(self.obj.pod_settings)) + dest_file_handle.write(output_template) + + def make_html(self, html_file_name: str, cleanup=True): """Add header and footer to the temporary output file at CASE_TEMP_HTML. """ - dest = os.path.join(self.WK_DIR, self._html_file_name) + dest = os.path.join(self.WORK_DIR, html_file_name) if os.path.isfile(dest): self.obj.log.warning("%s: '%s' exists, deleting.", - self._html_file_name, self.obj.name) + html_file_name, self.obj.name) os.remove(dest) - template_dict = self.obj.env_vars.copy() + template_dict = self.obj.pod_env_vars.copy() template_dict['DATE_TIME'] = \ datetime.datetime.utcnow().strftime("%A, %d %B %Y %I:%M%p (UTC)") + template_dict['PODNAME'] = self.obj.name util.append_html_template( - self.html_src_file('mdtf_header.html'), dest, template_dict + self.html_src_file('mdtf_multirun_header.html'), dest, template_dict ) util.append_html_template(self.CASE_TEMP_HTML, dest, {}) + with io.open(dest, 'a', encoding='utf-8') as f: + if self.multi_case_figure: + self.generate_html_file_case_loop(self.obj.multi_case_dict['CASE_LIST'], template_dict, f) + self.append_case_info_html(self.obj.multi_case_dict['CASE_LIST'], f) + f.close() util.append_html_template( self.html_src_file('mdtf_footer.html'), dest, template_dict ) if cleanup: os.remove(self.CASE_TEMP_HTML) - shutil.copy2(self.html_src_file('mdtf_diag_banner.png'), self.WK_DIR) + shutil.copy2(self.html_src_file('mdtf_diag_banner.png'), self.WORK_DIR) - def backup_config_files(self): + def backup_config_files(self, config): """Record user input configuration in a file named ``config_save.json`` for rerunning. """ - config = core.ConfigManager() for config_tup in config._configs.values(): if config_tup.backup_filename is None: continue - out_file = os.path.join(self.WK_DIR, config_tup.backup_filename) + out_file = os.path.join(self.WORK_DIR, config_tup.backup_filename) if not self.file_overwrite: out_file, _ = util.bump_version(out_file) elif os.path.exists(out_file): @@ -396,21 +481,20 @@ def make_tar_file(self): self.obj.log.info("%s: Creating '%s'.", self.obj.full_name, out_path) elif os.path.exists(out_path): self.obj.log.info("%s: Overwriting '%s'.", self.obj.full_name, out_path) - tar_flags = [f"--exclude=.{s}" for s in ('netCDF','nc','ps','PS','eps')] + tar_flags = [f"--exclude=.{s}" for s in ('netCDF', 'nc', 'ps', 'PS', 'eps')] tar_flags = ' '.join(tar_flags) util.run_shell_command( - f'tar {tar_flags} -czf {out_path} -C {self.WK_DIR} .', - dry_run = self.dry_run + [f'tar {tar_flags} -czf {out_path} -C {self.WORK_DIR} .'] ) return out_path def copy_to_output(self): """Copy all files to the user-specified output directory (``$OUTPUT_DIR``). """ - if self.WK_DIR == self.OUT_DIR: - return # no copying needed + if self.WORK_DIR == self.OUT_DIR: + return # no copying needed self.obj.log.debug("%s: Copy '%s' to '%s'.", self.obj.full_name, - self.WK_DIR, self.OUT_DIR) + self.WORK_DIR, self.OUT_DIR) try: if os.path.exists(self.OUT_DIR): if not self.overwrite: @@ -419,155 +503,28 @@ def copy_to_output(self): shutil.rmtree(self.OUT_DIR) except Exception: raise - shutil.move(self.WK_DIR, self.OUT_DIR) + shutil.move(self.WORK_DIR, self.OUT_DIR) - def make_output(self): - """Top-level method for doing all output activity post-init. Spun into a - separate method to make subclassing easier. - """ - # create empty text file for PODs to append to; equivalent of 'touch' - open(self.CASE_TEMP_HTML, 'w').close() - for pod in self.obj.iter_children(): - try: - pod_output = self._PodOutputManagerClass(pod, self) - pod_output.make_output() - if not pod.failed: - self.verify_pod_links(pod) - except Exception as exc: - pod.deactivate(exc) - continue - for pod in self.obj.iter_children(): - try: - self.append_result_link(pod) - except Exception as exc: - # won't go into the html output, but will be present in the - # summary for the case - pod.deactivate(exc) - continue - pod.close_log_file(log=True) - if not pod.failed: - pod.status = core.ObjectStatus.SUCCEEDED - - self.make_html() - self.backup_config_files() - self.write_data_log_file() - if self.make_variab_tar: - _ = self.make_tar_file() - self.copy_to_output() - if not self.obj.failed \ - and not any(p.failed for p in self.obj.iter_children()): - self.obj.status = core.ObjectStatus.SUCCEEDED - - -class MultirunHTMLOutputManager(HTMLOutputManager, - AbstractOutputManager, - HTMLSourceFileMixin): - """OutputManager that collects the output of all PODs run in multirun mode - as html pages. - - Instantiates :class:`HTMLPodOutputManager` objects to handle processing the - output of each POD. - """ - _PodOutputManagerClass = HTMLPodOutputManager - _html_file_name = 'index.html' - - def __init__(self, pod): - config = core.ConfigManager() - try: - self.make_variab_tar = config['make_variab_tar'] - self.dry_run = config['dry_run'] - self.overwrite = config['overwrite'] - self.file_overwrite = self.overwrite # overwrite both config and .tar - except KeyError as exc: - self.log.exception("Caught %r", exc) - - self.CODE_ROOT = pod._parent.code_root - self.WK_DIR = pod.POD_WK_DIR # abbreviate - self.OUT_DIR = pod.POD_OUT_DIR # abbreviate - self.obj = pod - - def append_result_link(self, pod): - """Update the top level index.html page with a link to *pod*'s results. - - This simply appends one of two html fragments to index.html: - ``src/html/pod_result_snippet.html`` if *pod* completed successfully, - or ``src/html/pod_error_snippet.html`` if an exception was raised during - *pod*'s setup or execution. - """ - template_d = html_templating_dict(pod) - # add a warning banner if needed - assert(hasattr(pod, '_banner_log')) - banner_str = pod._banner_log.buffer_contents() - if banner_str: - banner_str = banner_str.replace('\n', '
\n') - src = self.html_src_file('warning_snippet.html') - template_d['MDTF_WARNING_BANNER_TEXT'] = banner_str - util.append_html_template(src, self.CASE_TEMP_HTML, template_d) - - # put in the link to results - if pod.failed: - # report error - src = self.html_src_file('pod_error_snippet.html') - # template_d['error_text'] = pod.format_log(children=True) - else: - # normal exit - src = self.html_src_file('multirun_pod_result_snippet.html') - util.append_html_template(src, self.CASE_TEMP_HTML, template_d) - - def make_output(self, pod): - """Top-level method for doing all output activity post-init. Spun into a - separate method to make subclassing easier. - """ - # create empty text file for PODs to append to; equivalent of 'touch' - open(self.CASE_TEMP_HTML, 'w').close() - try: - pod_output = self._PodOutputManagerClass(pod, self) - pod_output.make_output() - if not pod.failed: - self.verify_pod_links(pod) - except Exception as exc: - pod.deactivate(exc) - try: - self.append_result_link(pod) # problems here - except Exception as exc: - # won't go into the html output, but will be present in the - # summary for the case - pod.deactivate(exc) - pod.close_log_file(log=True) - if not pod.failed: - pod.status = core.ObjectStatus.SUCCEEDED - - self.make_html() - self.backup_config_files() - self.write_data_log_file() - if self.make_variab_tar: - _ = self.make_tar_file() - self.copy_to_output() - if not self.obj.failed \ - and not any(p.failed for p in self.obj.iter_children()): - self.obj.status = core.ObjectStatus.SUCCEEDED + def verify_pod_links(self, pod): + """Check for missing files linked to from POD's html page. - def make_html(self, cleanup=True): - """Add header and footer to the temporary output file at CASE_TEMP_HTML. + See documentation for :class:`~src.verify_links.LinkVerifier`. This method + calls :class:`~src.verify_links.LinkVerifier` to check existence of all + files linked to from the POD's own top-level html page (after templating). + If any files are missing, an error message listing them is written to + the run's ``index.html`` page (located in ``src/html/pod_missing_snippet.html``). """ - dest = os.path.join(self.WK_DIR, self._html_file_name) - if os.path.isfile(dest): - self.obj.log.warning("%s: '%s' exists, deleting.", - self._html_file_name, self.obj.name) - os.remove(dest) - - template_dict = self.obj.pod_env_vars.copy() - template_dict['DATE_TIME'] = \ - datetime.datetime.utcnow().strftime("%A, %d %B %Y %I:%M%p (UTC)") - template_dict['PODNAME'] = self.obj.name - util.append_html_template( - self.html_src_file('mdtf_multirun_header.html'), dest, template_dict - ) - util.append_html_template(self.CASE_TEMP_HTML, dest, {}) - util.append_html_template( - self.html_src_file('mdtf_footer.html'), dest, template_dict + pod.log.info('Checking linked output files for %s.', pod.full_name) + verifier = verify_links.LinkVerifier( + self.pod_html(pod), # root html file to start search at + self.WORK_DIR, # root directory to resolve relative paths + verbose=False, + log=pod.log ) - if cleanup: - os.remove(self.CASE_TEMP_HTML) - shutil.copy2(self.html_src_file('mdtf_diag_banner.png'), self.WK_DIR) - + missing_out = verifier.verify_pod_links(pod.name) + if missing_out: + pod.deactivate( + util.MDTFFileNotFoundError(f'Missing {len(missing_out)} files.') + ) + else: + pod.log.info('\tNo files are missing.') diff --git a/src/pod_setup.py b/src/pod_setup.py index 615287319..fa3db1ead 100644 --- a/src/pod_setup.py +++ b/src/pod_setup.py @@ -1,217 +1,310 @@ """Classes for POD setup routines previously located in data_manager.DataSourceBase """ -from abc import ABC import logging import os -from src import util, core, varlistentry_util, varlist_util +import io +from pathlib import Path +import subprocess + +from src import cli, util +import dataclasses as dc +from distutils.spawn import find_executable _log = logging.getLogger(__name__) -class PodSetupBaseClass(metaclass=util.MDTFABCMeta): +class PodBaseClass(metaclass=util.MDTFABCMeta): """Base class for POD setup methods """ - def setup_pod(self, pod): + + def parse_pod_settings_file(self, code_root: str): pass - def setup_var(self, pod, v): + def setup_pod(self, config: util.NameSpace, + model_paths: util.ModelDataPathManager, + case_list: dict): pass - def variable_dest_path(self): + def setup_var(self, pod, v): pass -class SingleRunPod(PodSetupBaseClass, ABC): +class PodObject(util.MDTFObjectBase, util.PODLoggerMixin, PodBaseClass): + """Class to hold pod information""" + # name: str Class atts inherited from MDTFObjectBase + # _id + # _parent: object + # status: ObjectStatus + pod_dims = dict() + pod_data = dict() + pod_vars = dict() + pod_settings = dict() + multi_case_dict = dict() # populated with case_info entries in enviroment_manager + overwrite: bool = False + # explict 'program' attribute in settings + _interpreters = dict + runtime_requirements: util.NameSpace + driver: str = "" + program: str = "" + pod_env_vars: util.ConsistentDict = dc.field(default_factory=util.ConsistentDict) + log_file: io.IOBase = dc.field(default=None, init=False) + nc_largefile: bool = False + bash_exec: str + global_env_vars: dict - def setup_pod(self, pod): - """Update POD with information that only becomes available after - DataManager and Diagnostic have been configured (ie, only known at - runtime, not from settings.jsonc.) + def __init__(self, name: str, runtime_config: util.NameSpace): + self.name = name + self._id = None + # define global environment variables: those that apply to the entire POD + self.pod_env_vars = os.environ.copy() + self.pod_env_vars['RGB'] = os.path.join(runtime_config.CODE_ROOT, 'shared', 'rgb') + self.pod_env_vars['CONDA_ROOT'] = os.path.expandvars(runtime_config.conda_root) + if any(runtime_config.micromamba_exe): + self.pod_env_vars['MICROMAMBA_EXE'] = runtime_config.micromamba_exe + else: + self.pod_env_vars['MICROMAMBA_EXE'] = "" + # globally enforce non-interactive matplotlib backend + # see https://matplotlib.org/3.2.2/tutorials/introductory/usage.html#what-is-a-backend + self.pod_env_vars['MPLBACKEND'] = "Agg" + self._interpreters = {'.py': 'python', '.ncl': 'ncl', '.R': 'Rscript'} + self.nc_largefile = runtime_config.large_file + self.bash_exec = find_executable('bash') + # Initialize the POD path object and define the POD output paths + # Don't need a new working directory since one is created when the model data directories are initialized + self.paths = util.PodPathManager(runtime_config, + env=self.pod_env_vars, + new_work_dir=False) + self.paths.setup_pod_paths(self.name) + util.MDTFObjectBase.__init__(self, name=self.name, _parent=None) - Could arguably be moved into Diagnostic's init, at the cost of - dependency inversion. - """ - pod.setup(self) - for v in pod.iter_children(): - try: - self.setup_var(pod, v) - except Exception as exc: - chained_exc = util.chain_exc(exc, f"configuring {v.full_name}.", - util.PodConfigError) - v.deactivate(chained_exc) - continue - # preprocessor will edit varlist alternates, depending on enabled functions - pod.preprocessor = self._PreprocessorClass(self, pod) - pod.preprocessor.edit_request(self, pod) - - for v in pod.iter_children(): - # deactivate failed variables, now that alternates are fully - # specified - if v.last_exception is not None and not v.failed: - v.deactivate(v.last_exception, level=logging.WARNING) - if pod.status == core.ObjectStatus.NOTSET and \ - any(v.status == core.ObjectStatus.ACTIVE for v in pod.iter_children()): - pod.status = core.ObjectStatus.ACTIVE + # Explicitly invoke MDTFObjectBase post_init and init methods so that _id and other inherited + # attributes are initialized correctly. Calling super()__init__ causes and error in the _id definition + def __post_init__(self, *args, **kwargs): + util.MDTFObjectBase.__post_init__(self) + # set up log (PODLoggerMixin) + self.init_log(log_dir=self.paths.POD_WORK_DIR) - def setup_var(self, pod, v): - """Update VarlistEntry fields with information that only becomes - available after DataManager and Diagnostic have been configured (ie, - only known at runtime, not from settings.jsonc.) + @property + def failed(self): + return self.status == util.ObjectStatus.FAILED + + @property + def active(self): + return self.status == util.ObjectStatus.ACTIVE + + @property + def _log_name(self): + # POD loggers sit in a subtree of the DataSource logger distinct from + # the DataKey loggers; the two subtrees are distinguished by class name + _log_name = f"{self.name}_{self._id}".replace('.', '_') + return f"{self.__class__.__name__}.{_log_name}" + + @property + def _children(self): + # property required by MDTFObjectBase + return self.multi_case_dict.values() + + @property + def full_name(self): + return f"<#{self._id}:{self.name}>" - Could arguably be moved into VarlistEntry's init, at the cost of - dependency inversion. + def close_log_file(self, log=True): + if self.log_file is not None: + if log: + self.log_file.write(self.format_log(children=False)) + self.log_file.close() + + def iter_case_names(self): + """Iterator returning :c """ - translate = core.VariableTranslator().get_convention(self.convention) - if v.T is not None: - v.change_coord( - 'T', - new_class={ - 'self': varlist_util.VarlistTimeCoordinate, - 'range': util.DateRange, - 'frequency': util.DateFrequency - }, - range=self.attrs.date_range, - calendar=util.NOTSET, - units=util.NOTSET - ) - v.dest_path = self.variable_dest_path(pod, v) + yield self.multi_case_dict.keys() + + def parse_pod_settings_file(self, code_root: str) -> util.NameSpace: + """Parse the POD settings file""" + settings_file_query = Path(code_root, 'diagnostics', self.name).glob('*settings.*') + settings_file_path = str([p for p in settings_file_query][0]) + # Use wildcard to support settings file in yaml and jsonc format + settings_dict = cli.parse_config_file(settings_file_path) + return util.NameSpace.fromDict({k: settings_dict[k] for k in settings_dict.keys()}) + + def verify_pod_settings(self): + """Verify that the POD settings file has the required entries""" + required_settings = {"driver": str, "long_name": "", "convention": "", + "runtime_requirements": list} + value = [] try: - trans_v = translate.translate(v) - v.translation = trans_v - # copy preferred gfdl post-processing component during translation - if hasattr(trans_v, "component"): - v.component = trans_v.component - if hasattr(trans_v,"rename_coords"): - v.rename_coords = trans_v.rename_coords - except KeyError as exc: - # can happen in normal operation (eg. precip flux vs. rate) - chained_exc = util.PodConfigEvent((f"Deactivating {v.full_name} due to " - f"variable name translation: {str(exc)}.")) - # store but don't deactivate, because preprocessor.edit_request() - # may supply alternate variables - v.log.store_exception(chained_exc) + value = [i for i in required_settings if i in self.pod_settings + and isinstance(self.pod_settings[i], type(required_settings[i]))] except Exception as exc: - chained_exc = util.chain_exc(exc, f"translating name of {v.full_name}.", - util.PodConfigError) - # store but don't deactivate, because preprocessor.edit_request() - # may supply alternate variables - v.log.store_exception(chained_exc) - - v.stage = varlistentry_util.VarlistEntryStage.INITED - - def variable_dest_path(self, pod, var): - """Returns the absolute path of the POD's preprocessed, local copy of - the file containing the requested dataset. Files not following this - convention won't be found by the POD. - """ - if var.is_static: - f_name = f"{self.name}.{var.name}.static.nc" - return os.path.join(pod.POD_WK_DIR, f_name) + raise util.PodConfigError("Caught Exception: required setting %s not in pod setting file %s", + value[0]) from exc + + def verify_runtime_reqs(runtime_reqs: dict): + for k, v in runtime_reqs.items(): + if any(v): + pod_env = k + break + pod_pkgs = runtime_reqs[pod_env] + + if "python" not in pod_env: + env_name = '_MDTF_' + pod_env.upper() + '_base' + else: + env_name = '_MDTF_' + pod_env.lower() + '_base' + conda_root = self.pod_env_vars['CONDA_ROOT'] + e = os.path.join(conda_root, 'envs', env_name) + + env_dir = util.resolve_path(e, + env_vars=self.pod_env_vars, + log=self.log) + assert os.path.isdir(env_dir), self.log.error(f'%s not found.', env_dir) + + if pod_env.lower != "python3": + pass + else: + self.log.info(f"Checking {e} for {self.name} package requirements") + if os.path.exists(os.path.join(conda_root, "bin/conda")): + args = [os.path.join(conda_root, "bin/conda"), + 'list', + '-n', + env_name] + elif os.path.exists(self.pod_env_vars['MICROMAMBA_EXE']): + args = [self.pod_env_vars['MICROMAMBA_EXE'], + 'list', + '-n', + env_name] + else: + raise util.PodConfigError('Could not find conda or micromamba executable') + + p1 = subprocess.run(args, + universal_newlines=True, + bufsize=1, + capture_output=True, + text=True, + env=self.pod_env_vars + ) + # verify that pod package names are substrings of at least one package installed + # in the pod environment + output = p1.stdout.splitlines() + for p in pod_pkgs: + has_pkgs = [o for o in output if p.lower() in o.lower()] + if not any(has_pkgs): + self.log.error(f'Package {p} not found in POD environment {pod_env}') + + try: + verify_runtime_reqs(self.pod_settings['runtime_requirements']) + except Exception as exc: + raise util.PodConfigError('POD runtime requirements not defined in specified Conda environment') \ + from exc + + def get_pod_settings(self, pod_settings_dict: util.NameSpace): + self.pod_settings = util.NameSpace.toDict(pod_settings_dict.settings) + + def get_pod_data(self, pod_settings: util.NameSpace): + if hasattr(pod_settings, 'data'): + self.pod_data = util.NameSpace.toDict(pod_settings.data) else: - freq = var.T.frequency.format_local() - f_name = f"{self.name}.{var.name}.{freq}.nc" - return os.path.join(pod.POD_WK_DIR, freq, f_name) + self.log.debug("The data attribute is undefined in '%s' settings file. " + "Using attributes defined separately for each variable", + self.name) + + def get_pod_dims(self, pod_settings: util.NameSpace): + self.pod_dims = util.NameSpace.toDict(pod_settings.dimensions) + def get_pod_vars(self, pod_settings: util.NameSpace): + self.pod_vars = util.NameSpace.toDict(pod_settings.varlist) -class MultiRunPod(PodSetupBaseClass, ABC): - # MultiRunDiagnostic class inherits directly from MultiRunPod class - # and there is no need to define a 'pod' parameter + def query_files_in_time_range(self, startdate, enddate): + pass - def setup_pod(self): - """Update POD with information that only becomes available after - DataManager and Diagnostic have been configured (i.e., only known at - runtime, not from settings.jsonc.) + def append_pod_env_vars(self, pod_input): + self.global_env_vars.update(v for v in pod_input.pod_env_vars) + + def set_entry_point(self): + """Locate the top-level driver script for the POD. + + Raises: :class:`~util.PodRuntimeError` if driver script can't be found. """ - for case_name, case_dict in self.cases.items(): - for v in case_dict.iter_children(): - try: - self.setup_var(v, case_dict.attrs.date_range, case_name) - except Exception as exc: - chained_exc = util.chain_exc(exc, f"configuring {v.full_name} in multirun mode.", - util.PodConfigError) - v.deactivate(chained_exc) - continue - # preprocessor will edit case varlist alternates, depending on enabled functions - # self is the Mul - self.preprocessor = self._PreprocessorClass(self) - # self=MulirunDiagnostic instance, and is passed as data_mgr parm to access - # cases - self.preprocessor.edit_request(self) - - for case_name, case_dict in self.cases.items(): - for v in case_dict.iter_children(): - # deactivate failed variables, now that alternates are fully - # specified - if v.last_exception is not None and not v.failed: - v.deactivate(v.last_exception, level=logging.WARNING) - if case_dict.status == core.ObjectStatus.NOTSET and \ - any(v.status == core.ObjectStatus.ACTIVE for v in case_dict.iter_children()): - case_dict.status = core.ObjectStatus.ACTIVE - # set MultirunDiagnostic object status to Active if all case statuses are Active - if self.status == core.ObjectStatus.NOTSET and \ - all(case_dict.status == core.ObjectStatus.ACTIVE for case_name, case_dict in self.cases.items()): - self.status = core.ObjectStatus.ACTIVE + self.driver = os.path.join(self.paths.POD_CODE_DIR, self.pod_settings["driver"]) + if not self.driver: + raise util.PodRuntimeError((f"No driver script found in " + f"{self.paths.POD_CODE_DIR}. Specify 'driver' in settings.jsonc."), + self) + if not os.path.isabs(self.driver): # expand relative path + self.driver = os.path.join(self.paths.POD_CODE_DIR, self.driver) + + self.log.debug("Setting driver script for %s to '%s'.", + self.full_name, self.driver) - def setup_var(self, v, date_range: util.DateRange, case_name: str): - """Update VarlistEntry fields "v" with information that only becomes - available after DataManager and Diagnostic have been configured (ie, - only known at runtime, not from settings.jsonc.) + def set_interpreter(self, pod_settings: util.NameSpace): + """Determine what executable should be used to run the driver script. - Could arguably be moved into VarlistEntry's init, at the cost of - dependency inversion. + .. note:: + Existence of the program on the environment's ``$PATH`` isn't checked + until before the POD runs (see :mod:`src.environment_manager`.) """ - translate = core.VariableTranslator().get_convention(self.convention) - if v.T is not None: - v.change_coord( - 'T', - new_class={ - 'self': varlist_util.VarlistTimeCoordinate, - 'range': util.DateRange, - 'frequency': util.DateFrequency - }, - range=date_range, - calendar=util.NOTSET, - units=util.NOTSET - ) - - v.dest_path = self.variable_dest_path(v, case_name) - try: - trans_v = translate.translate(v) - v.translation = trans_v - # copy preferred gfdl post-processing component during translation - if hasattr(trans_v, "component"): - v.component = trans_v.component - if hasattr(trans_v,"rename_coords"): - v.rename_coords = trans_v.rename_coords - except KeyError as exc: - # can happen in normal operation (eg. precip flux vs. rate) - chained_exc = util.PodConfigEvent((f"Deactivating {v.full_name} for multirun case {case_name} due to " - f"variable name translation: {str(exc)}.")) - # store but don't deactivate, because preprocessor.edit_request() - # may supply alternate variables - v.log.store_exception(chained_exc) - except Exception as exc: - chained_exc = util.chain_exc(exc, f"translating name of {v.full_name} for multirun case {case_name}.", - util.PodConfigError) - # store but don't deactivate, because preprocessor.edit_request() - # may supply alternate variables - v.log.store_exception(chained_exc) - - v.stage = varlistentry_util.VarlistEntryStage.INITED - - def variable_dest_path(self, var, case_name): - """Returns the absolute path of the POD's preprocessed, local copy of - the file containing the requested dataset. Files not following this - convention won't be found by the POD. + + if not self.program: + # Find ending of filename to determine the program that should be used + _, driver_ext = os.path.splitext(pod_settings.driver) + # Possible error: Driver file type unrecognized + if driver_ext not in self._interpreters: + raise util.PodRuntimeError((f"Don't know how to call a '{driver_ext}' " + f"file.\nSupported programs: {list(self._interpreters.values())}"), + self + ) + self.program = self._interpreters[driver_ext] + self.log.debug("Set program for %s to '%s'.", + self.full_name, self.program) + + def setup_pod(self, runtime_config: util.NameSpace, + model_paths: util.ModelDataPathManager, + cases: dict): + """Update POD information from settings and runtime configuration files """ - # TODO add option for file(s) with just the var name in a designated directory - # Would involve a regex search for the variable name for a single file, or match - # all files in a specified file list (txt file, json, yaml) + # Parse the POD settings file + pod_input = self.parse_pod_settings_file(runtime_config.CODE_ROOT) + self.get_pod_settings(pod_input) + self.get_pod_vars(pod_input) + self.get_pod_data(pod_input) + self.get_pod_dims(pod_input) + # verify that required settings are specified, + # and that required packages are installed in the target Conda environment + self.verify_pod_settings() + # append user-specified pod_env_vars to PodObject pod_env_vars dict + if 'pod_env_vars' in self.pod_settings: + if len(self.pod_settings['pod_env_vars']) > 0: + for k, v in self.pod_settings['pod_env_vars'].items(): + self.pod_env_vars[k] = v + self.set_interpreter(pod_input.settings) + self.runtime_requirements = pod_input.settings['runtime_requirements'] + pod_convention = self.pod_settings['convention'].lower() + + for case_name, case_dict in runtime_config.case_list.items(): + cases[case_name].read_varlist(self) + # Translate the data if desired and the pod convention does not match the case convention + data_convention = case_dict.convention.lower() + if runtime_config.translate_data and pod_convention != data_convention: + self.log.info(f'Translating POD variables from {pod_convention} to {data_convention}') + else: + data_convention = 'no_translation' + self.log.info(f'POD convention and data convention are both {data_convention}. ' + f'No data translation will be performed for case {case_name}.') + # A 'noTranslationFieldlist' will be defined for the varlistEntry translation attribute + cases[case_name].translate_varlist(model_paths, + case_name, + data_convention) + + for case_name in cases.keys(): + for v in cases[case_name].iter_children(): + # deactivate failed variables now that alternates are fully specified + if v.last_exception is not None and not v.failed: + util.deactivate(v, v.last_exception, level=logging.WARNING) + if cases[case_name].status == util.ObjectStatus.NOTSET and \ + any(v.status == util.ObjectStatus.ACTIVE for v in cases[case_name].iter_children()): + cases[case_name].status = util.ObjectStatus.ACTIVE + # set MultirunDiagnostic object status to Active if all case statuses are Active + if self.status == util.ObjectStatus.NOTSET and \ + all(case_dict.status == util.ObjectStatus.ACTIVE for case_name, case_dict in cases.items()): + self.status = util.ObjectStatus.ACTIVE + - if var.is_static: - f_name = f"{case_name}.{var.name}.static.nc" - return os.path.join(self.MODEL_WK_DIR[case_name], f_name) - else: - freq = var.T.frequency.format_local() - f_name = f"{case_name}.{var.name}.{freq}.nc" - return os.path.join(self.MODEL_WK_DIR[case_name], freq, f_name) diff --git a/src/preprocessor.py b/src/preprocessor.py index 1fcfb2fef..c3f0f65d8 100644 --- a/src/preprocessor.py +++ b/src/preprocessor.py @@ -6,20 +6,31 @@ import abc import dataclasses import datetime -import functools -from src import util, core, varlistentry_util, diagnostic, xr_parser, units +import importlib +import pandas as pd +from src import util, varlist_util, translation, xr_parser, units import cftime +import intake import numpy as np import xarray as xr +import collections + +# TODO: Make the following lines a unit test +# import sys +# ROOT_DIR = os.path.abspath("../MDTF-diagnostics") +# sys.path.append(ROOT_DIR) +# user_scripts = importlib.import_module("user_scripts") +# from user_scripts import example_pp_script +# test_str = example_pp_script.test_example_script() import logging _log = logging.getLogger(__name__) -def copy_as_alternate(old_v, data_mgr, **kwargs): +def copy_as_alternate(old_v, **kwargs): """Wrapper for :py:func:`dataclasses.replace` that creates a copy of an - existing variable (:class:`~src.diagnostic.VarlistEntry`) *old_v* and sets appropriate + existing variable (:class:`~src.varlist.VarlistEntry`) *old_v* and sets appropriate attributes to designate it as an alternate variable. """ if 'coords' not in kwargs: @@ -29,118 +40,15 @@ def copy_as_alternate(old_v, data_mgr, **kwargs): new_v = dataclasses.replace( old_v, _id=util.MDTF_ID(), # assign distinct ID - stage=varlistentry_util.VarlistEntryStage.INITED, # reset state from old_v - status=core.ObjectStatus.INACTIVE, # new VE meant as an alternate - requirement=varlistentry_util.VarlistEntryRequirement.ALTERNATE, + stage=varlist_util.VarlistEntryStage.INITED, # reset state from old_v + status=util.ObjectStatus.INACTIVE, # new VE meant as an alternate + requirement=varlist_util.VarlistEntryRequirement.ALTERNATE, # plus the specific replacements we want to make: **kwargs ) return new_v -def edit_request_wrapper(wrapped_edit_request_func): - """Decorator implementing the most typical use case for - :meth:`~PreprocessorFunctionBase.edit_request` in preprocessor functions, in - which we look at each variable request in the varlist separately and, - optionally, insert a new alternate :class:`~src.diagnostic.VarlistEntry` - after it, based on that variable. - - This decorator wraps a function (*wrapped_edit_request_func*) which either - constructs and returns the desired new alternate - :class:`~src.diagnostic.VarlistEntry`, or returns None if no alternates are - to be added for the given variable request. It adds logic for updating the - list of alternates for the pod's varlist. - - .. note:: - - This decorator alters the signature of the decorated function, which is - not in keeping with Python best practices. The expected signature of - *wrapped_edit_request_func* is (:class:`~src.diagnostic.VarlistEntry` *v*, - :class:`~src.diagnostic.Diagnostic` *pod*, *data_mgr*), while the - signature of the returned function is that of - :meth:`PreprocessorFunctionBase.edit_request`. - """ - - @functools.wraps(wrapped_edit_request_func) - def wrapped_edit_request(self, data_mgr, pod): - new_varlist = [] - for v in pod.varlist.iter_contents(): - new_v = wrapped_edit_request_func(self, v, pod, data_mgr) - if new_v is None: - # no change, pass through VE unaltered - new_varlist.append(v) - continue - else: - # insert new_v between v itself and v's old alternate sets - # in varlist query order - new_v.alternates = v.alternates - v.alternates = [[new_v]] - new_v_t_name = (str(new_v.translation) - if getattr(new_v, 'translation', None) is not None - else "(not translated)") - v_t_name = (str(v.translation) if getattr(v, 'translation', None) - is not None else "(not translated)") - pod.log.debug("%s for %s: add translated %s as alternate for %s.", - self.__class__.__name__, v.full_name, new_v_t_name, v_t_name) - new_varlist.append(v) - new_varlist.append(new_v) - pod.varlist = diagnostic.Varlist(contents=new_varlist) - - return wrapped_edit_request - - -def multirun_edit_request_wrapper(multirun_wrapped_edit_request_func): - """Decorator implementing the most typical use case for - :meth:`~PreprocessorFunctionBase.edit_request` in multirun preprocessor functions, in - which we loop through each case, look at each variable request in the varlist separately and, - optionally, insert a new alternate :class:`~src.diagnostic.VarlistEntry` - after it, based on that variable. - - This decorator wraps a function (*multirun_wrapped_edit_request_func*) which either - constructs and returns the desired new alternate - :class:`~src.diagnostic.VarlistEntry`, or returns None if no alternates are - to be added for the given variable request. It adds logic for updating the - list of alternates for each cases varlist. - - .. note:: - - This decorator alters the signature of the decorated function, which is - not in keeping with Python best practices. The expected signature of - *wrapped_edit_request_func* is (:class:`~src.diagnostic.VarlistEntry` *v*, - :class:`~src.diagnostic.MultirunDiagnostic` *data_mgr*), while the - signature of the returned function is that of - :meth:`PreprocessorFunctionBase.edit_request`. - """ - - @functools.wraps(multirun_wrapped_edit_request_func) - def wrapped_edit_request(self, data_mgr): - for case_name, case_d in data_mgr.cases.items(): - new_varlist = [] - for v in case_d.varlist.iter_contents(): - new_v = multirun_wrapped_edit_request_func(self, v, data_mgr) - if new_v is None: - # no change, pass through VE unaltered - new_varlist.append(v) - continue - else: - # insert new_v between v itself and v's old alternate sets - # in varlist query order - new_v.alternates = v.alternates - v.alternates = [[new_v]] - new_v_t_name = (str(new_v.translation) - if getattr(new_v, 'translation', None) is not None - else "(not translated)") - v_t_name = (str(v.translation) if getattr(v, 'translation', None) - is not None else "(not translated)") - case_d.log.debug("%s for %s: add translated %s as alternate for %s.", - self.__class__.__name__, v.full_name, new_v_t_name, v_t_name) - new_varlist.append(v) - new_varlist.append(new_v) - case_d.varlist = diagnostic.MultirunVarlist(contents=new_varlist) - - return wrapped_edit_request - - class PreprocessorFunctionBase(abc.ABC): """Abstract interface for implementing a specific preprocessing functionality. As described in :doc:`fmwk_preprocess`, each preprocessing operation is @@ -158,41 +66,35 @@ class PreprocessorFunctionBase(abc.ABC): - :meth:`process`, which actually implements the data format conversion. """ - def __init__(self, data_mgr, *args): + def __init__(self, *args): """Called during Preprocessor's init.""" pass - def edit_request(self, data_mgr, *args): + def edit_request(self, v: varlist_util.VarlistEntry, **kwargs): """Edit the data requested in *pod*'s :class:`~src.diagnostic.Varlist` queue, based on the transformations the functionality can perform (in :meth:`process`). If the function can transform data in format *X* to format *Y* and the POD requests *X*, this method should insert an alternate variable request (:class:`~src.diagnostic.VarlistEntry`) for *Y*. - - Args: - data_mgr: Parent data source instance, used read-only to obtain - initialization information not available from individual PODs. """ - pass + return v @abc.abstractmethod - def process(self, var, dataset, *args): + def execute(self, var: varlist_util.VarlistEntry, + xr_dataset, + **kwargs): """Apply the format conversion implemented in this PreprocessorFunction to the input dataset *dataset*, according to the request made in *var*. Args: - var (:class:`~src.diagnostic.VarlistEntry`): POD varlist entry - instance describing POD's data request, which is the desired end - result of the conversion implemented by this method. - dataset: `xarray.Dataset - `__ - instance. + var: dictionary of variable information + xr_dataset: xarray dataset with information from ESM intake catalog Returns: Modified *dataset*. """ - return dataset + pass class CropDateRangeFunction(PreprocessorFunctionBase): @@ -215,12 +117,7 @@ def cast_to_cftime(dt: datetime.datetime, calendar): ('tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min', 'tm_sec')) return cftime.datetime(*tt, calendar=calendar) - def edit_request(self, data_mgr, pod): - """No-op for this PreprocessorFunction, since no alternate data is needed. - """ - pass - - def process(self, var, ds, *args): + def execute(self, var, ds, **kwargs): """Parse quantities related to the calendar for time-dependent data and truncate the date range of model dataset *ds*. @@ -228,7 +125,7 @@ def process(self, var, ds, *args): user's input before we knew the calendar being used by the model. The workaround here to cast those values into `cftime.datetime `__ - objects so they can be compared with the model data's time axis. + objects so that they can be compared with the model data's time axis. """ tv_name = var.name_in_model t_coord = ds.cf.dim_axes(tv_name).get('T', None) @@ -320,16 +217,16 @@ def process(self, var, ds, *args): var.log.info(("Requested dates for %s coincide with range of dataset " "'%s -- %s'; left unmodified."), var.full_name, - nt_values[0].strftime('%Y-%m-%d'), - nt_values[-1].strftime('%Y-%m-%d'), + nt_values[0].strftime('%Y-%m-%d:%H-%M-%S'), + nt_values[-1].strftime('%Y-%m-%d:%H-%M-%S'), ) else: var.log.info("Cropped date range of %s from '%s -- %s' to '%s -- %s'.", var.full_name, - t_start.strftime('%Y-%m-%d'), - t_end.strftime('%Y-%m-%d'), - nt_values[0].strftime('%Y-%m-%d'), - nt_values[-1].strftime('%Y-%m-%d'), + t_start.strftime('%Y-%m-%d:%H-%M-%S'), + t_end.strftime('%Y-%m-%d:%H-%M-%S'), + nt_values[0].strftime('%Y-%m-%d:%H-%M-%S'), + nt_values[-1].strftime('%Y-%m-%d:%H-%M-%S'), tags=util.ObjectLogTag.NC_HISTORY ) return ds @@ -349,7 +246,7 @@ class PrecipRateToFluxFunction(PreprocessorFunctionBase): """ # Incorrect but matches convention for this conversion. _liquid_water_density = units.Units('1000.0 kg m-3') - # list of regcognized standard_names for which transformation is applicable + # list of recognized standard_names for which transformation is applicable # NOTE: not exhaustive _std_name_tuples = [ # flux in CF, rate is not @@ -362,8 +259,7 @@ class PrecipRateToFluxFunction(PreprocessorFunctionBase): _rate_d = {tup[0]: tup[1] for tup in _std_name_tuples} _flux_d = {tup[1]: tup[0] for tup in _std_name_tuples} - @edit_request_wrapper - def edit_request(self, v, pod, data_mgr): + def edit_request(self, v: varlist_util.VarlistEntry, **kwargs): """Edit *pod*\'s Varlist prior to query. If the :class:`~src.diagnostic.VarlistEntry` *v* has a ``standard_name`` in the recognized list, insert an alternate VarlistEntry whose translation @@ -375,38 +271,46 @@ def edit_request(self, v, pod, data_mgr): """ std_name = getattr(v, 'standard_name', "") if std_name not in self._rate_d and std_name not in self._flux_d: - # logic not applicable to this VE; do nothing - return None + # logic not applicable to this VE; do nothing and return varlistEntry for + # next function to run edit_request on + return v # construct dummy var to translate (rather than modifying std_name & units) # on v's translation) because v may not have a translation if std_name in self._rate_d: # requested rate, so add alternate for flux v_to_translate = copy_as_alternate( - v, data_mgr, + v, standard_name=self._rate_d[std_name], units=units.to_cfunits(v.units) * self._liquid_water_density ) elif std_name in self._flux_d: # requested flux, so add alternate for rate v_to_translate = copy_as_alternate( - v, data_mgr, + v, standard_name=self._flux_d[std_name], units=units.to_cfunits(v.units) / self._liquid_water_density ) - translate = core.VariableTranslator() + translate = translation.VariableTranslator() + for key, val in kwargs: + if 'convention' in key: + to_convention = val + else: + to_convention = None + assert to_convention, 'to_convention not defined in *args of PrecipRatetoFLuxConversion' try: - new_tv = translate.translate(data_mgr.attrs.convention, v_to_translate) + new_tv = translate.translate(to_convention, v_to_translate) except KeyError as exc: - pod.log.debug(('%s edit_request on %s: caught %r when trying to ' - 'translate \'%s\'; varlist unaltered.'), self.__class__.__name__, - v.full_name, exc, v_to_translate.standard_name) + v.log.debug(('%s edit_request on %s: caught %r when trying to ' + 'translate \'%s\'; varlist unaltered.'), self.__class__.__name__, + v.full_name, exc, v_to_translate.standard_name) return None - new_v = copy_as_alternate(v, data_mgr) + new_v = copy_as_alternate(v) new_v.translation = new_tv return new_v + #v = new_v - def process(self, var, ds, *args): + def execute(self, var, ds, **kwargs): """Convert units of dependent variable *ds* between precip rate and precip flux, as specified by the desired units given in *var*. If the ``standard_name`` of *ds* is not in the recognized list, return it @@ -452,12 +356,7 @@ class ConvertUnitsFunction(PreprocessorFunctionBase): :doc:`src.units`. """ - def edit_request(self, data_mgr, pod): - """No-op for this PreprocessorFunction, since no alternate data is needed. - """ - pass - - def process(self, var, ds, *args): + def execute(self, var, ds, **kwargs): """Convert units on the dependent variable and coordinates of var from what's specified in the dataset attributes to what's given in the VarlistEntry *var*. Units attributes are updated on the @@ -504,12 +403,7 @@ class RenameVariablesFunction(PreprocessorFunctionBase): """Renames dependent variables and coordinates to what's expected by the POD. """ - def edit_request(self, data_mgr, pod): - """No-op for this PreprocessorFunction, since no alternate data is needed. - """ - pass - - def process(self, var, ds, *args): + def execute(self, var, ds, **kwargs): """Change the names of the DataArrays with Dataset *ds* to the names specified by the :class:`~src.diagnostic.VarlistEntry` *var*. Names of the dependent variable and all dimension coordinates and scalar @@ -555,16 +449,26 @@ def process(self, var, ds, *args): class AssociatedVariablesFunction(PreprocessorFunctionBase): """Preprocessor class to copy associated variables to wkdir""" - def process(self, var, ds, *args): + def execute(self, var, ds, **kwargs): + casename = "" + pod_wkdir = "" + query_associated_files = False + for k, v in kwargs.items(): + if 'work_dir' in k: + pod_wkdir = v + elif 'case_name' in k: + casename = v + elif 'query_associated_files' in k: + query_associated_files = v + + if not query_associated_files or not var.associated_files: + return ds try: - # get string labels from variable object - pod_wkdir = var._parent.POD_WK_DIR - casename = var._parent._parent.name # iterate over active associated files and get current local paths associated_files = list( - var.iter_associated_files_keys(status=core.ObjectStatus.ACTIVE) + var.iter_associated_files_keys(status=util.ObjectStatus.ACTIVE) ) associated_files = [d_key.local_data for d_key in associated_files] @@ -593,6 +497,7 @@ def process(self, var, ds, *args): var.log.debug( f"Error encountered with preprocessing associated files: {exc}" ) + pass return ds @@ -608,8 +513,7 @@ class ExtractLevelFunction(PreprocessorFunctionBase): :meth:`process` raises a KeyError. """ - @edit_request_wrapper - def edit_request(self, v, pod, data_mgr): + def edit_request(self, v: varlist_util.VarlistEntry, **kwargs): """Edit the *pod*'s :class:`~src.diagnostic.Varlist` prior to data query. If given a :class:`~src.diagnostic.VarlistEntry` *v* has a ``scalar_coordinate`` for the Z axis (i.e., is requesting data on a @@ -617,16 +521,18 @@ def edit_request(self, v, pod, data_mgr): removed (i.e., requesting a full 3D variable) to be used as an alternate variable for *v*. - The signature of this method is altered by the :func:`edit_request_wrapper` - decorator. """ + for key, val in kwargs.items(): + if 'convention' in key: + data_convention = val + if not v.translation: # hit this if VE not defined for this model naming convention; - # do nothing for this v - return None + # do nothing for this v and return for next pp function edit_request + return v elif v.translation.get_scalar('Z') is None: # hit this if VE didn't request Z level extraction; do nothing - return None + return v tv = v.translation # abbreviate if len(tv.scalar_coords) == 0: @@ -639,18 +545,18 @@ def edit_request(self, v, pod, data_mgr): if v.use_exact_name: new_tv_name = v.name else: - new_tv_name = core.VariableTranslator().from_CF_name( - data_mgr.attrs.convention, v.standard_name, new_ax_set + new_tv_name = translation.VariableTranslator().from_CF_name( + data_convention, v.standard_name, new_ax_set, v.realm ) new_tv = tv.remove_scalar( tv.scalar_coords[0].axis, name=new_tv_name ) - new_v = copy_as_alternate(v, data_mgr) + new_v = copy_as_alternate(v) new_v.translation = new_tv return new_v - def process(self, var, ds, *args): + def execute(self, var, ds, **kwargs): """Determine if level extraction is needed (if *var* has a scalar Z coordinate and Dataset *ds* is 3D). If so, return the appropriate 2D slice of *ds*, otherwise pass through *ds* unaltered. @@ -732,12 +638,53 @@ class ApplyScaleAndOffsetFunction(PreprocessorFunctionBase): that are known to be incorrect. """ - def edit_request(self, data_mgr, pod): - """No-op for this PreprocessorFunction, since no alternate data is needed. + def edit_request(self, v: varlist_util.VarlistEntry, **kwargs): + """Edit the *pod*'s :class:`~src.VarlistEntry.Varlist` prior to data query. + If given a :class:`~src.VarlistEntry` *v* has a + ``scalar_coordinate`` for the Z axis (i.e., is requesting data on a + pressure level), return a copy of *v* with that ``scalar_coordinate`` + removed (i.e., requesting a full 3D variable) to be used as an alternate + variable for *v*. + + The signature of this method is altered by the :func:`multirun_edit_request_wrapper` + decorator. """ - pass + for key, val in kwargs.items(): + if 'convention' in key: + data_convention = val + else: + data_convention = None + if not v.translation: + # hit this if VE not defined for this model naming convention; + # do nothing for this v + return None + elif v.translation.get_scalar('Z') is None: + # hit this if VE didn't request Z level extraction; do nothing + return None - def process(self, var, ds, *args): + tv = v.translation # abbreviate + if len(tv.scalar_coords) == 0: + raise AssertionError # should never get here + elif len(tv.scalar_coords) > 1: + raise NotImplementedError() + # wraps method in data_model; makes a modified copy of translated var + # restore name to that of 4D data (eg. 'u500' -> 'ua') + new_ax_set = set(v.axes_set).add('Z') + if v.use_exact_name: + new_tv_name = v.name + else: + new_tv_name = translation.VariableTranslator().from_CF_name( + data_convention, v.standard_name, v.realm, new_ax_set + ) + new_tv = tv.remove_scalar( + tv.scalar_coords[0].axis, + name=new_tv_name + ) + new_v = copy_as_alternate(v) + new_v.translation = new_tv + return new_v + + def execute(self, var, ds, **kwargs): """Retrieve the ``scale_factor`` and ``add_offset`` attributes from the dependent variable of *ds*, and if set, apply the linear transformation to the dependent variable. If both are set, the scaling is applied first @@ -768,7 +715,21 @@ def process(self, var, ds, *args): return ds -# ================================================== +class UserDefinedPreprocessorFunction(PreprocessorFunctionBase): + """Class to hold user-defined preprocessor functions""" + user_defined_script: str + + def __init__(self, pp_script: str): + """Called during Preprocessor's init.""" + self.user_defined_script = pp_script + + def edit_request(self, v, **kwargs): + """Dummy implementation of edit_request to meet abstract base class requirements + """ + return v + + def execute(self, var, ds, **kwargs): + pass class MDTFPreprocessorBase(metaclass=util.MDTFABCMeta): @@ -785,26 +746,29 @@ class MDTFPreprocessorBase(metaclass=util.MDTFABCMeta): which will be the input to that POD. """ _XarrayParserClass = xr_parser.DefaultDatasetParser - - def __init__(self, data_mgr, pod): - config = core.ConfigManager() - self.overwrite_ds = config.get('overwrite_file_metadata', False) - - self.WK_DIR = data_mgr.MODEL_WK_DIR - self.convention = data_mgr.attrs.convention - self.pod_convention = pod.convention - - if getattr(pod, 'nc_largefile', False): + WORK_DIR: dict + """List of PreprocessorFunctions to be executed on a per-file basis as the + multi-file Dataset is being loaded, rather than afterwards as part of the + :meth:`process`. Note that such functions will not be able to rely on the + metadata cleaning done by xr_parser. + """ + file_preproc_functions = util.abstract_attribute() + output_to_ncl: bool = False + nc_format: str + user_pp_scripts: list + + def __init__(self, + model_paths: util.ModelDataPathManager, + config: util.NameSpace): + self.WORK_DIR = model_paths.MODEL_WORK_DIR + # initialize PreprocessorFunctionBase objects + self.file_preproc_functions = [] + # initialize xarray parser + self.parser = self._XarrayParserClass(config) + if config.large_file: self.nc_format = "NETCDF4_CLASSIC" else: self.nc_format = "NETCDF4" - # HACK only used for _FillValue workaround in clean_output_encoding - self.output_to_ncl = ('ncl' in pod.runtime_requirements) - - # initialize xarray parser - self.parser = self._XarrayParserClass(data_mgr, pod) - # initialize PreprocessorFunctionBase objects - self.functions = [cls_(data_mgr, pod) for cls_ in self._functions] @property def _functions(self): @@ -815,37 +779,139 @@ def _functions(self): tuple of classes (inheriting from :class:`PreprocessorFunctionBase`) listing the preprocessing functions to be called, in order. """ - config = core.ConfigManager() - if config.get('disable_preprocessor', False): - # omit unit conversion functions; following two functions necessary - # in all cases to obtain correct output - return ( - CropDateRangeFunction, RenameVariablesFunction - ) - else: - # normal operation: run all functions - return ( - CropDateRangeFunction, - PrecipRateToFluxFunction, ConvertUnitsFunction, - ExtractLevelFunction, RenameVariablesFunction, - AssociatedVariablesFunction + # normal operation: run all functions + return [ + CropDateRangeFunction, AssociatedVariablesFunction, + PrecipRateToFluxFunction, ConvertUnitsFunction, + ExtractLevelFunction, RenameVariablesFunction, + ] + + def check_group_daterange(self, group_df: pd.DataFrame, log=_log) -> pd.DataFrame: + """Sort the files found for each experiment by date, verify that + the date ranges contained in the files are contiguous in time and that + the date range of the files spans the query date range. + + Args: + group_df (Pandas Dataframe): + log: log file + """ + date_col = "date_range" + try: + # method throws ValueError if ranges aren't contiguous + dates_df = group_df.loc[:, ['start_time', 'end_time']] + date_range_vals = [] + for idx, x in enumerate(group_df.values): + st = dates_df.at[idx, 'start_time'] + en = dates_df.at[idx, 'end_time'] + date_range_vals.append(util.DateRange(st, en)) + + group_df = group_df.assign(date_range=date_range_vals) + sorted_df = group_df.sort_values(by=date_col) + + files_date_range = util.DateRange.from_contiguous_span( + *(sorted_df[date_col].to_list()) ) + # throws AssertionError if we don't span the query range + # TODO: define self.attrs.DateRange from runtime config info + # assert files_date_range.contains(self.attrs.date_range) + return sorted_df + except ValueError: + log.error("Non-contiguous or malformed date range in files:", sorted_df["path"].values) + except AssertionError: + log.debug(("Eliminating expt_key since date range of files (%s) doesn't " + "span query range (%s)."), files_date_range, self.attrs.date_range) + except Exception as exc: + log.warning(f"Caught exception {repr(exc)}") + # hit an exception; return empty DataFrame to signify failure + return pd.DataFrame(columns=group_df.columns) + + def query_catalog(self, + case_dict: dict, + data_catalog: str, + *args) -> dict: + """Apply the format conversion implemented in this PreprocessorFunction + to the input dataset *dataset*, according to the request made in *var*. - def edit_request(self, data_mgr, pod): + Args: + case_dict: dictionary of case names + data_catalog: path to data catalog header file + + Returns: + Dictionary of xarray datasets with catalog information for each case + """ + + # open the csv file using information provided by the catalog definition file + cat = intake.open_esm_datastore(data_catalog) + # create filter lists for POD variables + cat_dict = {} + # Instantiate dataframe to hold catalog subset information + cols = list(cat.df.columns.values) + if 'date_range' not in [c.lower() for c in cols]: + cols.append('date_range') + for case_name, case_d in case_dict.items(): + # path_regex = re.compile(r'(?i)(?`__ - to load a single netCDF file. - """ - if len(path_list) != 1: - raise ValueError(f"{var.full_name}: Expected one file, got {path_list}.") - var.log.debug("Loaded '%s'.", path_list[0], tags=util.ObjectLogTag.IN_FILE) - return xr.open_dataset( - path_list[0], - **self.open_dataset_kwargs - ) - - @abc.abstractmethod - def read_dataset(self, var): - """Abstract method to load downloaded model data into an xarray Dataset, - to be implemented by child classes. + def rename_dataset_keys(self, ds: dict, case_list: dict) -> collections.OrderedDict: + """Rename dataset keys output by ESM intake catalog query to case names`""" - Args: - var (:class:`~src.diagnostic.VarlistEntry`): POD varlist entry - instance describing POD's data request, which is the desired end - result of the conversion implemented by this method. + def rename_key(old_dict: dict, new_dict: collections.OrderedDict, old_key, new_key): + """Credit: https://stackoverflow.com/questions/16475384/rename-a-dictionary-key""" + new_dict[new_key] = old_dict[old_key] - Returns: - xarray Dataset containing the model data requested by *var*. - """ - pass # return ds + new_dict = collections.OrderedDict() + case_names = [c for c in case_list.keys()] + for old_key, case_d in ds.items(): + (path, filename) = os.path.split(case_d.attrs['intake_esm_attrs:path']) + rename_key(ds, new_dict, old_key, [c for c in case_names if c in filename][0]) + return new_dict def clean_nc_var_encoding(self, var, name, ds_obj): """Clean up the ``attrs`` and ``encoding`` dicts of *ds_obj* @@ -928,10 +980,10 @@ def clean_nc_var_encoding(self, var, name, ds_obj): attrs_to_delete = set([]) # mark attrs with sentinel value for deletion - for k, v in attrs.items(): - if v == xr_parser.ATTR_NOT_FOUND: - var.log.debug("Caught unset attribute '%s' of '%s'.", k, name) - attrs_to_delete.add(k) + for key, val in attrs.items(): + if val == xr_parser.ATTR_NOT_FOUND: + var.log.debug("Caught unset attribute '%s' of '%s'.", key, name) + attrs_to_delete.add(key) # clean up _FillValue old_fillvalue = encoding.get('_FillValue', np.nan) if name != var.translation.name \ @@ -959,7 +1011,9 @@ def clean_nc_var_encoding(self, var, name, ds_obj): if k in attrs: del attrs[k] - def clean_output_attrs(self, var, ds): + def clean_output_attrs(self, + var: varlist_util.VarlistEntry, + ds: xr.Dataset): """Calls :meth:`clean_nc_var_encoding` on all sets of attributes in the Dataset *ds*. """ @@ -1028,7 +1082,7 @@ def write_dataset(self, var, ds): # TODO: remove any netCDF Variables that were present in the input file # (and ds) but not needed for PODs' data request os.makedirs(os.path.dirname(var.dest_path), exist_ok=True) - var.log.debug("Writing '%s'.", var.dest_path, tags=util.ObjectLogTag.OUT_FILE) + # var.log.info("Writing '%s'.", var.dest_path, tags=util.ObjectLogTag.OUT_FILE) if var.is_static: unlimited_dims = [] else: @@ -1041,22 +1095,42 @@ def write_dataset(self, var, ds): ) ds.close() - def load_ds(self, var): - """Top-level method to load dataset and parse metadata; spun out so that - child classes can modify it. Calls the :meth:`read_dataset` method + def write_ds(self, case_list: dict, + catalog_subset: collections.OrderedDict, + pod_reqs: dict): + """Top-level method to write out processed dataset *ds*; spun out so + that child classes can modify it. Calls the :meth:`write_dataset` method implemented by the child class. """ - try: - ds = self.read_dataset(var) - except Exception as exc: - raise util.chain_exc(exc, (f"loading " - f"dataset for {var.full_name}."), util.DataPreprocessEvent) - var.log.debug("Read %d mb for %s.", ds.nbytes / (1024 * 1024), var.full_name) + for k, v in pod_reqs.items(): + if 'ncl' in v: + self.output_to_ncl = True + for case_name, ds in catalog_subset.items(): + for var in case_list[case_name].varlist.iter_vars(): + # var.log.info("Writing %d mb to %s", ds[var.name].variable.nbytes / (1024 * 1024), var.dest_path) + try: + ds = self.clean_output_attrs(var, ds) + ds = self.log_history_attr(var, ds) + except Exception as exc: + raise util.chain_exc(exc, (f"cleaning attributes to " + f"write data for {var.full_name}."), util.DataPreprocessEvent) + try: + self.write_dataset(var, ds) + except Exception as exc: + raise util.chain_exc(exc, f"writing data for {var.full_name}.", + util.DataPreprocessEvent) + + # del ds # shouldn't be necessary + + def parse_ds(self, + var: varlist_util.VarlistEntry, + ds: xr.Dataset) -> xr.Dataset: + """Top-level method to parse metadata; spun out so that child classes can modify it. + """ try: ds = self.parser.parse(var, ds) except Exception as exc: - raise util.chain_exc(exc, (f"parsing file " - f"metadata for {var.full_name}."), util.DataPreprocessEvent) + raise util.chain_exc(exc, f"parsing dataset metadata", util.DataPreprocessEvent) return ds def process_ds(self, var, ds): @@ -1076,570 +1150,144 @@ def process_ds(self, var, ds): ) return ds - def write_ds(self, var, ds): - """Top-level method to write out processed dataset *ds*; spun out so - that child classes can modify it. Calls the :meth:`write_dataset` method - implemented by the child class. + def process(self, + case_list: dict, + config: util.NameSpace, + model_work_dir: dict) -> collections.OrderedDict: + """Top-level wrapper method for doing all preprocessing of data files + associated with each case in the case_list dictionary """ - path_str = util.abbreviate_path(var.dest_path, self.WK_DIR, '$WK_DIR') - var.log.info("Writing %d mb to %s", ds.nbytes / (1024 * 1024), path_str) + # get the initial model data subset from the ESM-intake catalog + cat_subset = self.query_catalog(case_list, config.DATA_CATALOG) + + for case_name, case_xr_dataset in cat_subset.items(): + for v in case_list[case_name].varlist.iter_vars(): + self.edit_request(v, convention=cat_subset[case_name].convention) + cat_subset[case_name] = self.parse_ds(v, case_xr_dataset) + self.execute_pp_functions(v, + cat_subset[case_name], + work_dir=model_work_dir[case_name], + case_name=case_name) + + return cat_subset + + def write_pp_catalog(self, + input_catalog_ds: xr.Dataset, + config: util.PodPathManager, + log: logging.log): + """ Write a new data catalog for the preprocessed data + to the POD output directory + """ + cat_file_name = "MDTF_postprocessed_data" + pp_cat_assets = util.define_pp_catalog_assets(config, cat_file_name) + file_list = util.get_file_list(config.OUTPUT_DIR) + # fill in catalog information from pp file name + entries = list(map(util.mdtf_pp_parser, file_list)) + # append columns defined in assets + columns = [att['column_name'] for att in pp_cat_assets['attributes']] + for col in columns: + for e in entries: + if col not in e.keys(): + e[col] = "" + # copy information from input catalog to pp catalog entries + global_attrs = ['convention', 'realm'] + for e in entries: + ds_match = input_catalog_ds[e['dataset_name']] + for att in global_attrs: + e[att] = ds_match.attrs.get(att, '') + ds_var = ds_match.data_vars.get(e['variable_id']) + for key, val in ds_var.attrs.items(): + if key in columns: + e[key] = val + + # create a Pandas dataframe rom the the catalog entries + cat_df = pd.DataFrame(entries) + cat_df.head() + # validate the catalog try: - ds = self.clean_output_attrs(var, ds) - ds = self.log_history_attr(var, ds) + log.debug('Validating pp data catalog') + validated_cat = intake.open_esm_datastore( + obj=dict( + df=cat_df, + esmcat=pp_cat_assets + ) + ) except Exception as exc: - raise util.chain_exc(exc, (f"cleaning attributes to " - f"write data for {var.full_name}."), util.DataPreprocessEvent) + log.error(f'Unable to validate esm intake catalog for pp data: {exc}') try: - self.write_dataset(var, ds) + log.debug(f'Writing pp data catalog {cat_file_name} csv and json files to {config.OUTPUT_DIR}') + validated_cat.serialize(cat_file_name, + directory=config.OUTPUT_DIR, + catalog_type="file") except Exception as exc: - raise util.chain_exc(exc, f"writing data for {var.full_name}.", - util.DataPreprocessEvent) - del ds # shouldn't be necessary - - def process(self, var): - """Top-level wrapper method for doing all preprocessing of data files - associated with the POD variable *var*. - """ - var.log.info("Preprocessing %s.", var) - ds = self.load_ds(var) - ds = self.process_ds(var, ds) - self.write_ds(var, ds) - var.log.debug("Successful preprocessor exit on %s.", var) - - -class SingleFilePreprocessor(MDTFPreprocessorBase): - """A Preprocessor class for preprocessing model data that's provided as a - single netCDF file per variable, for example the POD's sample model data. - - Implemented separately in the event that we (or the user) doesn't want to - bring in dask as an external dependency. - """ - - def read_dataset(self, var): - """Read a single file Dataset specified by the ``local_data`` attribute of - *var*, using :meth:`read_one_file`. - """ - return self.read_one_file(var, var.local_data) + log.error(f'Unable to save esm intake catalog for pp data: {exc}') class NullPreprocessor(MDTFPreprocessorBase): - """A class that skips preprocessing and just symlinks files from the input dir to the wkdir + """A class that skips preprocessing and just symlinks files from the input dir to the work dir """ - def __init__(self, data_mgr, pod): - config = core.ConfigManager() - self.overwrite_ds = config.get('overwrite_file_metadata', False) - - self.WK_DIR = data_mgr.MODEL_WK_DIR - self.convention = data_mgr.attrs.convention - self.pod_convention = pod.convention - - if getattr(pod, 'nc_largefile', False): - self.nc_format = "NETCDF4_CLASSIC" - else: - self.nc_format = "NETCDF4" - # HACK only used for _FillValue workaround in clean_output_encoding - self.output_to_ncl = ('ncl' in pod.runtime_requirements) - - # initialize xarray parser - self.parser = self._XarrayParserClass(data_mgr, pod) - # dummy attribute--no pp functions to perform - self.functions = [] + def __init__(self, + model_paths: util.ModelDataPathManager, + config: util.NameSpace): + # initialize PreprocessorFunctionBase objects + super().__init__(model_paths, config) + self.file_preproc_functions = [] - def edit_request(self, data_mgr, pod): + def edit_request(self, v, **kwargs): """Dummy implementation of edit_request to meet abstract base class requirements """ - pass + return v - def read_dataset(self, var): - """Dummy implementation of read_dataset to meet abstract base class requirements - """ - pass - - def process(self, var): + def process(self, case_list: dict, + config: util.NameSpace, + model_work_dir: dict) -> collections.OrderedDict: """Top-level wrapper method for doing all preprocessing of data files - associated with the POD variable *var*. - """ - var.log.debug("Skipping preprocessing for %s.", var) - - -class DaskMultiFilePreprocessor(MDTFPreprocessorBase): - """A Preprocessor class that uses xarray's dask support to - preprocess model data provided as one or multiple netcdf files per - variable, using xarray `open_mfdataset() - `__. - """ - _file_preproc_functions = util.abstract_attribute() - """List of PreprocessorFunctions to be executed on a per-file basis as the - multi-file Dataset is being loaded, rather than afterwards as part of the - :meth:`process`. Note that such functions will not be able to rely on the - metadata cleaning done by xr_parser.""" - - def __init__(self, data_mgr, pod): - super(DaskMultiFilePreprocessor, self).__init__(data_mgr, pod) - # initialize PreprocessorFunctionBase objects - self.file_preproc_functions = \ - [cls_(data_mgr, pod) for cls_ in self._file_preproc_functions] - - def edit_request(self, data_mgr, pod): - """Edit *pod*\'s data request, based on the child class's functionality. If - the child class has a function that can transform data in format *X* to - format *Y* and the POD requests *X*, this method should insert a - backup/fallback request for *Y*. + associated with each case in the caselist dictionary """ - for func in self.file_preproc_functions: - func.edit_request(data_mgr, pod) - super(DaskMultiFilePreprocessor, self).edit_request(data_mgr, pod) + # get the initial model data subset from the ESM-intake catalog + cat_subset = self.query_catalog(case_list, config.DATA_CATALOG) + for case_name, case_xr_dataset in cat_subset.items(): + for v in case_list[case_name].varlist.iter_vars(): + self.edit_request(v, convention=cat_subset[case_name].convention) + cat_subset[case_name] = self.parse_ds(v, case_xr_dataset) - def read_dataset(self, var): - """Open multi-file Dataset specified by the ``local_data`` attribute of - *var*, wrapping xarray `open_mfdataset() - `__. - """ + return cat_subset - def _file_preproc(ds): - for f in self.file_preproc_functions: - ds = f.process(var, ds) - return ds - assert var.local_data - if len(var.local_data) == 1: - ds = self.read_one_file(var, var.local_data) - return _file_preproc(ds) - else: - assert not var.is_static # just to be safe - var.log.debug("Loaded multi-file dataset of %d files:\n%s", - len(var.local_data), - '\n'.join(4 * ' ' + f"'{f}'" for f in var.local_data), - tags=util.ObjectLogTag.IN_FILE - ) - return xr.open_mfdataset( - var.local_data, - combine="by_coords", - # only time-dependent variables and coords are concat'ed: - data_vars="minimal", coords="minimal", - # all non-concat'ed vars must be the same; global attrs can differ - # from file to file; values in ds are taken from first file - compat="equals", - join="exact", # raise ValueError if non-time dims conflict - parallel=True, # use dask - preprocess=_file_preproc, - **self.open_dataset_kwargs - ) - - -# ------------------------------------------------- - -class SampleDataPreprocessor(SingleFilePreprocessor): - """Implementation class for :class:`MDTFPreprocessorBase` intended for use - on sample model data distributed with the package. Assumes all data is in - one netCDF file. - """ - # Need to include all functions; ExtractLevelFunction needed for - # NCAR-CAM5.timeslice for Travis CI - pass - - -class DefaultPreprocessor(DaskMultiFilePreprocessor): - """Implementation class for :class:`MDTFPreprocessorBase` for the general - use case. Includes all implemented functionality and handles multi-file data. - """ - _file_preproc_functions = [] - - -class MultirunDaskMultiFilePreprocessor(DaskMultiFilePreprocessor): +class DaskMultiFilePreprocessor(MDTFPreprocessorBase): """A Preprocessor class that uses xarray's dask support to preprocess model data provided as one or multiple netcdf files per variable, using xarray `open_mfdataset() `__. """ - _file_preproc_functions = [] + module_root: str = "" - def __init__(self, data_mgr): + def __init__(self, + model_paths: util.ModelDataPathManager, + config: util.NameSpace): # initialize PreprocessorFunctionBase objects - self.file_preproc_functions = \ - [cls_(data_mgr) for cls_ in self._file_preproc_functions] - - def edit_request(self, data_mgr, *args): - """Edit *pod*\'s data request, based on the child class's functionality. If - the child class has a function that can transform data in format *X* to - format *Y* and the POD requests *X*, this method should insert a - backup/fallback request for *Y*. - """ - for func in self.file_preproc_functions: - func.edit_request(data_mgr, *args) - - -class MultirunDefaultPreprocessor(MultirunDaskMultiFilePreprocessor): - """Implementation class for :class:`MDTFPreprocessorBase` intended for use - on sample model data distributed with the package. Assumes all data for each - multirun case is in one netCDF file. - """ - _XarrayParserClass = xr_parser.MultirunDefaultDatasetParser - - def __init__(self, data_mgr): - super(MultirunDefaultPreprocessor, self).__init__(data_mgr) - config = core.ConfigManager() - self.overwrite_ds = config.get('overwrite_file_metadata', False) - - self.WK_DIR = data_mgr.MODEL_WK_DIR - self.convention = data_mgr.convention - self.pod_convention = self.convention - - if not data_mgr.nc_largefile: - self.nc_format = "NETCDF4_CLASSIC" - else: - self.nc_format = "NETCDF4" - # HACK only used for _FillValue workaround in clean_output_encoding - self.output_to_ncl = ('ncl' in data_mgr.runtime_requirements) - - # initialize xarray parser - self.parser = self._XarrayParserClass(data_mgr) - # initialize PreprocessorFunctionBase objects - self.functions = [cls_(data_mgr) for cls_ in self._functions] - - @property - def _functions(self): - """Determine which PreprocessorFunctions are applicable to the current - package run, defaulting to all of them. - - Returns: - tuple of classes (inheriting from :class:`PreprocessorFunctionBase`) - listing the preprocessing functions to be called, in order. - """ - config = core.ConfigManager() - if config.get('disable_preprocessor', False): - # omit unit conversion functions; following two functions necessary - # in all cases to obtain correct output - return ( - MultirunCropDateRangeFunction, MultirunRenameVariablesFunction - ) - else: - # normal operation: run all functions - return ( - MultirunCropDateRangeFunction, - MultirunPrecipRateToFluxFunction, MultirunConvertUnitsFunction, - MultirunExtractLevelFunction, MultirunRenameVariablesFunction, - MultirunAssociatedVariablesFunction - ) - - # Same as MDTFPreprocessorBase: edit_request, but only need data_mgr arg - def edit_request(self, data_mgr, *args): - """Top-level method to edit each case's data request in the data_mgr, based on the child - class's functionality. Calls the :meth:`~PreprocessorFunctionBase.edit_request` - method on all included PreprocessorFunctions. - """ - for func in self.functions: - func.edit_request(data_mgr, *args) - - def process(self, var, casename: str): - """Top-level wrapper method for doing all preprocessing of data files - associated with the POD variable *var*. - """ - var.log.info("Preprocessing %s.", var) - ds = self.load_ds(var) - ds = self.process_ds(var, ds, casename) - self.write_ds(var, ds) - var.log.debug("Successful preprocessor exit on %s.", var) - - def process_ds(self, var, ds, casename: str): - """Top-level method to call the :meth:`~PreprocessorFunctionBase.process` - of each included PreprocessorFunction on the Dataset *ds*. Spun out into - its own method so that child classes can modify it. - """ - for f in self.functions: + super().__init__(model_paths, config) + self.file_preproc_functions = [f for f in self._functions] + if any([s for s in config.user_pp_scripts]): + self.add_user_pp_scripts(config) + self.module_root = os.path.join(config.CODE_ROOT, "user_scripts") + + def add_user_pp_scripts(self, runtime_config: util.NameSpace): + self.user_pp_scripts = [s for s in runtime_config.user_pp_scripts] + for s in self.user_pp_scripts: try: - var.log.debug("Calling %s on %s.", f.__class__.__name__, - var.full_name) - ds = f.process(var, ds, casename) - except Exception as exc: - raise util.chain_exc(exc, (f'Preprocessing on {var.full_name} ' - f'failed at {f.__class__.__name__}.'), - util.DataPreprocessEvent - ) - return ds - - def write_ds(self, var, ds): - """Top-level method to write out processed dataset *ds*; spun out so - that child classes can modify it. Calls the :meth:`write_dataset` method - implemented by the child class. - """ - for casename, wkdir in self.WK_DIR.items(): - if wkdir in var.dest_path: - break - - path_str = util.abbreviate_path(var.dest_path, wkdir, '$WK_DIR') - var.log.info("Writing %d mb to %s", ds.nbytes / (1024 * 1024), path_str) - try: - ds = self.clean_output_attrs(var, ds) - ds = self.log_history_attr(var, ds) - except Exception as exc: - raise util.chain_exc(exc, (f"cleaning attributes to " - f"write data for {var.full_name}."), util.DataPreprocessEvent) - try: - self.write_dataset(var, ds) - except Exception as exc: - raise util.chain_exc(exc, f"writing data for {var.full_name}.", - util.DataPreprocessEvent) - del ds # shouldn't be necessary - - -class MultirunCropDateRangeFunction(CropDateRangeFunction): - - # Same as CropDateRangeFunction: edit_request, but only need data_mgr arg - def edit_request(self, data_mgr, *args): - """No-op for this PreprocessorFunction, since no alternate data is needed. - """ - pass - - -class MultirunRenameVariablesFunction(RenameVariablesFunction): - """Renames dependent variables and coordinates to what's expected by the POD. - """ - - # Same as RenameVariablesFunction: edit_request, but only need data_mgr arg - def edit_request(self, data_mgr, *args): - """No-op for this PreprocessorFunction, since no alternate data is needed. - """ - pass - - -class MultirunPrecipRateToFluxFunction(PrecipRateToFluxFunction): - """A PreprocessorFunction which converts the dependent variable's units, for - the specific case of precipitation. Flux and precip rate differ by a factor - of the density of water, so can't be handled by the udunits2 implementation - provided by :class:`~src.units.Units`. Instead, they're handled here as a - special case. The general case of unit conversion is handled by - :class:`ConvertUnitsFunction`. - - CF ``standard_names`` recognized for the conversion are ``precipitation_flux``, - ``convective_precipitation_flux``, ``large_scale_precipitation_flux``, and - likewise for ``*_rate``. - """ - - @multirun_edit_request_wrapper - def edit_request(self, v, data_mgr, *args): - """Edit *case*\'s Varlist prior to query. If the - :class:`~src.MultirunDiagnostic.VarlistEntry` *v* has a ``standard_name`` in the - recognized list, insert an alternate VarlistEntry whose translation - requests the complementary type of variable (i.e., if given rate, add an - entry for flux; if given flux, add an entry for rate.) - - The signature of this method is altered by the :func:`multirun_edit_request_wrapper` - decorator. - """ - std_name = getattr(v, 'standard_name', "") - if std_name not in self._rate_d and std_name not in self._flux_d: - # logic not applicable to this VE; do nothing - return None - # construct dummy var to translate (rather than modifying std_name & units) - # on v's translation) because v may not have a translation - if std_name in self._rate_d: - # requested rate, so add alternate for flux - v_to_translate = copy_as_alternate( - v, data_mgr, - standard_name=self._rate_d[std_name], - units=units.to_cfunits(v.units) * self._liquid_water_density - ) - elif std_name in self._flux_d: - # requested flux, so add alternate for rate - v_to_translate = copy_as_alternate( - v, data_mgr, - standard_name=self._flux_d[std_name], - units=units.to_cfunits(v.units) / self._liquid_water_density - ) - - translate = core.VariableTranslator() - try: - new_tv = translate.translate(data_mgr.attrs.convention, v_to_translate) - except KeyError as exc: - self.log.debug(('%s edit_request on %s: caught %r when trying to ' - 'translate \'%s\'; varlist unaltered.'), self.__class__.__name__, - v.full_name, exc, v_to_translate.standard_name) - return None - new_v = copy_as_alternate(v, data_mgr) - new_v.translation = new_tv - return new_v - - -class MultirunConvertUnitsFunction(ConvertUnitsFunction): - """Convert units on the dependent variable of var, as well as its - (non-time) dimension coordinate axes, from what's specified in the dataset - attributes to what's requested in the :class:`~src.diagnostic.VarlistEntry`. - - Unit conversion is implemented by - `cfunits `__; see - :doc:`src.units`. - """ - - # Same as ConvertUnitsFunction: edit_request, but only need data_mgr arg - def edit_request(self, data_mgr, *args): - """No-op for this PreprocessorFunction, since no alternate data is needed. - """ - pass - - -class MultirunExtractLevelFunction(ExtractLevelFunction): - """Extract a requested pressure level from a Dataset containing a 3D variable. - - .. note:: - - Unit conversion on the vertical coordinate is implemented, but - parametric vertical coordinates and coordinate interpolation are not. - If a pressure level is requested that isn't present in the data, - :meth:`process` raises a KeyError. - This class is identical to parent ExtractLevelFunction except that pod data is obtained - from data_mgr parameter with information from the MultirunDiagnostic object - rather than the pod parameter - """ - - @multirun_edit_request_wrapper - def edit_request(self, v, data_mgr, *args): - """Edit the *pod*'s :class:`~src.diagnostic.Varlist` prior to data query. - If given a :class:`~src.MultirunDiagnostic.VarlistEntry` *v* has a - ``scalar_coordinate`` for the Z axis (i.e., is requesting data on a - pressure level), return a copy of *v* with that ``scalar_coordinate`` - removed (i.e., requesting a full 3D variable) to be used as an alternate - variable for *v*. - - The signature of this method is altered by the :func:`multirun_edit_request_wrapper` - decorator. - """ - if not v.translation: - # hit this if VE not defined for this model naming convention; - # do nothing for this v - return None - elif v.translation.get_scalar('Z') is None: - # hit this if VE didn't request Z level extraction; do nothing - return None - - tv = v.translation # abbreviate - if len(tv.scalar_coords) == 0: - raise AssertionError # should never get here - elif len(tv.scalar_coords) > 1: - raise NotImplementedError() - # wraps method in data_model; makes a modified copy of translated var - # restore name to that of 4D data (eg. 'u500' -> 'ua') - new_ax_set = set(v.axes_set).add('Z') - if v.use_exact_name: - new_tv_name = v.name - else: - new_tv_name = core.VariableTranslator().from_CF_name( - data_mgr.convention, v.standard_name, new_ax_set - ) - - new_tv = tv.remove_scalar( - tv.scalar_coords[0].axis, - name=new_tv_name - ) - new_v = copy_as_alternate(v, data_mgr) - new_v.translation = new_tv - return new_v - - class MultirunApplyScaleAndOffsetFunction(ApplyScaleAndOffsetFunction): - """If the Dataset has ``scale_factor`` and ``add_offset`` attributes set, - apply the corresponding constant linear transformation to the dependent - variable's values and unset these attributes. See `CF convention documentation - `__ - on the ``scale_factor`` and ``add_offset`` attributes. - - .. note:: - - By default this function is not applied. It's only provided to implement - workarounds for running the package on data with metadata (i.e., units) - that are known to be incorrect. - """ - - # Same as ApplyScaleAndOffsetFunction: edit_request, but only need data_mgr arg - def edit_request(self, data_mgr, *args): - """No-op for this PreprocessorFunction, since no alternate data is needed. - Overrides ApplyScaleAndOffsetFunction: edit_request, and does not have pod parameter - """ - pass - - -class MultirunAssociatedVariablesFunction(AssociatedVariablesFunction): - """Preprocessor class to copy associated variables to wkdir""" - - def process(self, var, ds, casename: str): - - try: - # get string labels from variable object - pod_wkdir = var._parent.POD_WK_DIR + os.path.exists(s) + except util.MDTFFileExistsError: + self.log.error(f"User-defined post-processing file {s} not found") - # iterate over active associated files and get current local paths - associated_files = list( - var.iter_associated_files_keys(status=core.ObjectStatus.ACTIVE) - ) - associated_files = [d_key.local_data for d_key in associated_files] - - # flatten a list of nested lists - associated_files = [ - d_key for sublist in associated_files for d_key in sublist - ] - # construct destination paths in wkdir - associated_files_dst = [ - f"{pod_wkdir}/assoc/{casename}.{os.path.basename(x)}" - for x in associated_files - ] - - # create `assoc` directory and copy files - os.makedirs(f"{pod_wkdir}/assoc/", exist_ok=True) - _ = [ - shutil.copy(*x) - for x in list(zip(associated_files, associated_files_dst)) - ] - - # Replace object attribute with CSV list of final paths in wkdir - var.associated_files = str(",").join(associated_files_dst) - - except Exception as exc: - var.log.debug( - f"Error encountered with preprocessing associated files: {exc}" - ) - - return ds - - -class MultirunNullPreprocessor(MultirunDefaultPreprocessor): - """A class that skips preprocessing and just symlinks files from the input dir to the wkdir +def init_preprocessor(model_paths: util.ModelDataPathManager, + config: util.NameSpace, + run_pp: bool = True): + """Initialize the data preprocessor class using runtime configuration specs """ - - _XarrayParserClass = xr_parser.MultirunDefaultDatasetParser - - def __init__(self, data_mgr): - config = core.ConfigManager() - self.overwrite_ds = config.get('overwrite_file_metadata', False) - - self.WK_DIR = data_mgr.MODEL_WK_DIR - self.convention = data_mgr.convention - self.pod_convention = self.convention - - if not data_mgr.nc_largefile: - self.nc_format = "NETCDF4_CLASSIC" - else: - self.nc_format = "NETCDF4" - # HACK only used for _FillValue workaround in clean_output_encoding - self.output_to_ncl = ('ncl' in data_mgr.runtime_requirements) - - # initialize xarray parser - self.parser = self._XarrayParserClass(data_mgr) - # Empty set since there's nothing to preprocess - self.functions = [] - - def edit_request(self, data_mgr, *args): - """Dummy implementation of edit_request to meet abstract base class requirements - """ - pass - - def read_dataset(self, var): - """Dummy implementation of read_dataset to meet abstract base class requirements - """ - pass - - def process(self, var): - """Top-level wrapper method for doing all preprocessing of data files - associated with the POD variable *var*. - """ - var.log.debug("Skipping preprocessing for %s.", var) + if not run_pp: + return NullPreprocessor(model_paths, config) + else: + return DaskMultiFilePreprocessor(model_paths, config) diff --git a/src/query_fetch_preprocess.py b/src/query_fetch_preprocess.py deleted file mode 100644 index 25889b27c..000000000 --- a/src/query_fetch_preprocess.py +++ /dev/null @@ -1,870 +0,0 @@ -import abc -import collections -import glob -from abc import ABC -import intake_esm -import os -import pandas as pd -import signal -from src import core, util, varlistentry_util, diagnostic - - -FileGlobTuple = collections.namedtuple( - 'FileGlobTuple', 'name glob attrs' -) -FileGlobTuple.__doc__ = """ - Class representing one file glob pattern. *attrs* is a dict containing the - data catalog values that will be associated with all files found using *glob*. - *name* is used for logging only. -""" - - -class AbstractQueryMixin(abc.ABC): - @abc.abstractmethod - def query_dataset(self, var): - """Sets *data* attribute on var or raises an exception.""" - pass - - def setup_query(self): - """Called once, before the iterative :meth:`~DataSourceBase.request_data` process starts. - Use to, eg, initialize database or remote filesystem connections. - """ - pass - - def pre_query_hook(self, vars): - """Called before querying the presence of a new batch of variables.""" - pass - - def set_experiment(self): - """Called after querying the presence of a new batch of variables, to - filter or otherwise ensure that the returned DataKeys for *all* - variables comes from the same experimental run of the model, by setting - the *status* attribute of those DataKeys to ACTIVE.""" - pass - - def post_query_hook(self, vars): - """Called after select_experiment(), after each query of a new batch of - variables.""" - pass - - def tear_down_query(self): - """Called once, after the iterative :meth:`~DataSourceBase.request_data` process ends. - Use to, eg, close database or remote filesystem connections. - """ - pass - - -class AbstractFetchMixin(abc.ABC): - @abc.abstractmethod - def fetch_dataset(self, var, data_key): - """Fetches data corresponding to *data_key*. Populates its *local_data* - attribute with a list of identifiers for successfully fetched data - (paths to locally downloaded copies of data). - """ - pass - - def setup_fetch(self): - """Called once, before the iterative :meth:`~DataSourceBase.request_data` process starts. - Use to, eg, initialize database or remote filesystem connections. - """ - pass - - def pre_fetch_hook(self, vars): - """Called before fetching each batch of query results.""" - pass - - def post_fetch_hook(self, vars): - """Called after fetching each batch of query results.""" - pass - - def tear_down_fetch(self): - """Called once, after the iterative :meth:`~DataSourceBase.request_data` process ends. - Use to, eg, close database or remote filesystem connections. - """ - pass - - -class AbstractDataSource(AbstractQueryMixin, AbstractFetchMixin, - metaclass=util.MDTFABCMeta): - @abc.abstractmethod - def __init__(self, case_dict, parent): - # sets signature of __init__ method - pass - - def pre_query_and_fetch_hook(self): - """Called once, before the iterative :meth:`~DataSourceBase.request_data` process starts. - Use to, eg, initialize database or remote filesystem connections. - """ - # call methods if we're using mixins; if not, child classes will override - if hasattr(self, 'setup_query'): - self.setup_query() - if hasattr(self, 'setup_fetch'): - self.setup_fetch() - - def post_query_and_fetch_hook(self): - """Called once, after the iterative :meth:`~DataSourceBase.request_data` process ends. - Use to, eg, close database or remote filesystem connections. - """ - # call methods if we're using mixins; if not, child classes will override - if hasattr(self, 'tear_down_query'): - self.tear_down_query() - if hasattr(self, 'tear_down_fetch'): - - self.tear_down_fetch() - - -class OnTheFlyFilesystemQueryMixin(metaclass=util.MDTFABCMeta): - """Mixin that creates an intake_esm.esm_datastore catalog by using a regex - (\_FileRegexClass) to query the existence of data files on a remote - filesystem. - - For the purposes of this class, all data attributes are inferred only from - filea nd directory naming conventions: the contents of the files are not - examined (i.e., the data files are not read from) until they are fetched to - a local filesystem. - - .. note:: - At time of writing, the `filename parsing - `__ - functionality included in `intake - `__ is too limited to - correctly parse our use cases, which is why we use the - :class:`~src.util.RegexPattern` class instead. - """ - # root directory to begin crawling at: - CATALOG_DIR = util.abstract_attribute() - # regex to use to generate catalog entries from relative paths: - _FileRegexClass = util.abstract_attribute() - _asset_file_format = "netcdf" - - @property - def df(self): - assert (hasattr(self, 'catalog') and hasattr(self.catalog, 'df')) - return self.catalog.df - - @property - def remote_data_col(self): - """Name of the column in the catalog containing the path to the remote - data file. - """ - return self._FileRegexClass._pattern.input_field - - def _dummy_esmcol_spec(self): - """Dummy specification dict that enables us to use intake_esm's - machinery. The catalog is temporary and not retained after the code - finishes running. - """ - data_cols = list(self._FileRegexClass._pattern.fields) - data_cols.remove(self.remote_data_col) - # no aggregations, since for now we want to manually insert logic for - # file fetching (& error handling etc.) before we load an xarray Dataset. - return { - "esmcat_version": "0.1.0", - "id": "MDTF_" + self.__class__.__name__, - "description": "", - "attributes": [ - {"column_name":c, "vocabulary": ""} for c in data_cols - ], - "assets": { - "column_name": self.remote_data_col, - "format": self._asset_file_format - }, - "last_updated": "2020-12-06", - 'aggregation_control': { - 'variable_column_name': 'variable', 'groupby_attrs': [] - } - } - - @abc.abstractmethod - def generate_catalog(self): - """Method (to be implemented by child classes) which returns the data - catalog as a Pandas DataFrame. One of the columns of the DataFrame must - have the name returned by :meth:`remote_data_col` and contain paths to - the files. - """ - pass - - def setup_query(self): - """Generate an intake_esm catalog of files found in CATALOG_DIR. - Attributes of files listed in the catalog (columns of the DataFrame) are - taken from the match groups (fields) of the class's \_FileRegexClass. - """ - self.log.info('Starting data file search at %s:', self.CATALOG_DIR) - # df: DataFrame, catalog content that would otherwise be in a csv file - # esmcol_data: Optional[Dict[str, Any]] = None, ESM collection spec information - # progressbar: bool = True, if True, prints progress bar to standard error when loading info into xarray dataset - # sep: str = '.', delimiter to use when constructing key for a query - # **kwargs: Any - - obj = {'df': self.generate_catalog(), 'esmcat': self._dummy_esmcol_spec()} - - self.catalog = intake_esm.core.esm_datastore( - obj, - progressbar=False, sep='|' - ) - - -class OnTheFlyDirectoryHierarchyQueryMixin( - OnTheFlyFilesystemQueryMixin, metaclass=util.MDTFABCMeta -): - """Mixin that creates an intake_esm.esm_datastore catalog on-the-fly by - crawling a directory hierarchy and populating catalog entry attributes - by running a regex (\_FileRegexClass) against the paths of files in the - directory hierarchy. - """ - # optional regex to speed up directory crawl to skip non-matching directories - # without examining all files; default below is to not skip any directories - _DirectoryRegex = util.RegexPattern(".*") - - def iter_files(self): - """Generator that yields instances of \_FileRegexClass generated from - relative paths of files in CATALOG_DIR. Only paths that match the regex - in \_FileRegexClass are returned. - """ - # in case CATALOG_DIR is subset of CASE_ROOT_DIR - path_offset = len(os.path.join(self.attrs.CASE_ROOT_DIR, "")) - for root, _, files in os.walk(self.CATALOG_DIR): - try: - self._DirectoryRegex.match(root[path_offset:]) - except util.RegexParseError: - continue - if not self._DirectoryRegex.is_matched: - continue - for f in files: - if f.startswith('.'): - continue - try: - path = os.path.join(root, f) - yield self._FileRegexClass.from_string(path, path_offset) - except util.RegexSuppressedError: - # decided to silently ignore this file - continue - except Exception: - self.log.info(" Couldn't parse path '%s'.", path[path_offset:]) - continue - - def generate_catalog(self): - """Crawl the directory hierarchy via :meth:`iter_files` and return the - set of found files as rows in a Pandas DataFrame. - """ - # DataFrame constructor must be passed list, not just an iterable - df = pd.DataFrame(list(self.iter_files()), dtype='object') - if len(df) == 0: - self.log.critical('Directory crawl did not find any files.') - raise AssertionError('Directory crawl did not find any files.') - else: - self.log.info("Directory crawl found %d files.", len(df)) - return df - - -class OnTheFlyGlobQueryMixin( - OnTheFlyFilesystemQueryMixin, metaclass=util.MDTFABCMeta -): - """Mixin that creates an intake_esm.esm_datastore catalog on-the-fly by - searching for files with (python's implementation of) the shell - :py:mod:`glob` syntax. - - We still invoke \_FileRegexClass to parse the paths, but the expected use - case is that this will be the trivial regex (matching everything, with no - labeled match groups), since the file selection logic is being handled by - the globs. If you know your data is stored according to some relevant - structure, you should use :class:`OnTheFlyDirectoryHierarchyQueryMixin` - instead. - """ - @abc.abstractmethod - def iter_globs(self): - """Iterator returning :class:`FileGlobTuple` instances. The generated - catalog contains the union of the files found by each of the globs. - """ - pass - - def iter_files(self, path_glob): - """Generator that yields instances of \_FileRegexClass generated from - relative paths of files in CATALOG_DIR. Only paths that match the regex - in \_FileRegexClass are returned. - """ - path_offset = len(os.path.join(self.attrs.CASE_ROOT_DIR, "")) - if not os.path.isabs(path_glob): - path_glob = os.path.join(self.CATALOG_DIR, path_glob) - for path in glob.iglob(path_glob, recursive=True): - yield self._FileRegexClass.from_string(path, path_offset) - - def generate_catalog(self): - """Build the catalog from the files returned from the set of globs - provided by :meth:`rel_path_globs`. - """ - catalog_df = pd.DataFrame(dtype='object') - for glob_tuple in self.iter_globs(): - # DataFrame constructor must be passed list, not just an iterable - df = pd.DataFrame( - list(self.iter_files(glob_tuple.glob)), - dtype='object' - ) - if len(df) == 0: - self.log.critical("No files found for '%s' with pattern '%s'.", - glob_tuple.name, glob_tuple.glob) - raise AssertionError((f"No files found for '{glob_tuple.name}' " - f"with pattern '{glob_tuple.glob}'.")) - else: - self.log.info("%d files found for '%s'.", len(df), glob_tuple.name) - - # add catalog attributes specific to this set of files - for k, v in glob_tuple.attrs.items(): - df[k] = v - catalog_df = catalog_df.append(df) - # need to fix repeated indices from .append()ing - return catalog_df.reset_index(drop=True) - - -class LocalFetchMixin(AbstractFetchMixin): - """Mixin implementing data fetch for files on a locally mounted filesystem. - No data is transferred; we assume that xarray can open the paths directly. - Paths are unaltered and set as variable's *local_data*. - """ - def fetch_dataset(self, var, d_key): - paths = d_key.remote_data() - if isinstance(paths, pd.Series): - paths = paths.to_list() - if not util.is_iterable(paths): - paths = (paths, ) - for path in paths: - if not os.path.exists(path): - raise util.DataFetchEvent((f"Fetch {d_key} ({var.full_name}): " - f"File not found at {path}."), var) - else: - self.log.debug("Fetch %s: found %s.", d_key, path) - d_key.local_data = paths - - -PodVarTuple = collections.namedtuple('PodVarTuple', ['pod', 'var']) -MAX_DATASOURCE_ITERS = 5 - - -class DataSourceQFPMixin(core.MDTFObjectBase, util.CaseLoggerMixin, - AbstractDataSource, ABC, metaclass=util.MDTFABCMeta): - """Mixin implementing data query, fetch, and preprocessing-related attributes and - methods - """ - @property - def full_name(self): - return f"<#{self._id}:{self.name}>" - - @property - def _children(self): - """Iterable of child objects (:class:`~diagnostic.Diagnostic`\s) - associated with this object. - """ - return self.pods.values() - - def iter_vars(self, active=None, pod_active=None): - """Iterator over all :class:`~diagnostic.VarlistEntry`\s (grandchildren) - associated with this case. Returns :class:`PodVarTuple`\s (namedtuples) - of the :class:`~diagnostic.Diagnostic` and :class:`~diagnostic.VarlistEntry` - objects corresponding to the POD and its variable, respectively. - - Args: - active: bool or None, default None. Selects subset of - :class:`~diagnostic.VarlistEntry`\s which are returned in the - namedtuples: - - - active = True: only iterate over currently active VarlistEntries. - - active = False: only iterate over inactive VarlistEntries - (VarlistEntries which have either failed or are currently - unused alternate variables). - - active = None: iterate over both active and inactive - VarlistEntries. - - pod_active: bool or None, default None. Same as *active*, but - filtering the PODs that are selected. - """ - - def _get_kwargs(active_): - if active_ is None: - return {'status': None} - if active_: - return {'status': core.ObjectStatus.ACTIVE} - else: - return {'status_neq': core.ObjectStatus.ACTIVE} - - pod_kwargs = _get_kwargs(pod_active) - var_kwargs = _get_kwargs(active) - for p in self.iter_children(**pod_kwargs): - for v in p.iter_children(**var_kwargs): - yield PodVarTuple(pod=p, var=v) - - def iter_vars_only(self, active=None): - """Convenience wrapper for :meth:`iter_vars` that returns only the - :class:`~diagnostic.VarlistEntry` objects (grandchildren) from all PODs - in this DataSource. - """ - yield from (pv.var for pv in self.iter_vars(active=active, pod_active=None)) - - # DATA QUERY/FETCH/PREPROCESS ------------------------------------- - - def data_key(self, value, expt_key=None, status=None): - """Constructor for an instance of :class:`DataKeyBase` that's used by - this DataSource. - """ - if status is None: - status = core.ObjectStatus.NOTSET - return self._DataKeyClass( - _parent=self, value=value, - expt_key=expt_key, status=status - ) - - def is_fetch_necessary(self, d_key, var=None): - if len(d_key.local_data) > 0: - self.log.debug("Already successfully fetched %s.", d_key) - return False - if d_key.failed: - self.log.debug("%s failed; not retrying.", d_key) - return False - return True - - def child_deactivation_handler(self, child, child_exc): - """When a DataKey (*child*) has been deactivated during query or fetch, - log a message on all VarlistEntries using it, and deactivate any - VarlistEntries with no remaining viable DataKeys. - """ - if isinstance(child, diagnostic.Diagnostic): - # DataSource has 2 types of children: PODs and DataKeys - # only need to handle the latter here - return - - for v in self.iter_vars_only(active=None): - v.deactivate_data_key(child, child_exc) - - def query_data(self): - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - vars_to_query = [ - v for v in self.iter_vars_only(active=True) \ - if v.stage < varlistentry_util.VarlistEntryStage.QUERIED - ] - if not vars_to_query: - break # exit: queried everything or nothing active - - self.log.debug('Query batch: [%s].', - ', '.join(v.full_name for v in vars_to_query)) - self.pre_query_hook(vars_to_query) - for v in vars_to_query: - try: - self.log.info("Querying %s.", v.translation) - self.query_dataset(v) # sets v.data - if not v.data: - raise util.DataQueryEvent("No data found.", v) - v.stage = varlistentry_util.VarlistEntryStage.QUERIED - except util.DataQueryEvent as exc: - v.deactivate(exc) - continue - except Exception as exc: - chained_exc = util.chain_exc(exc, - f"querying {v.translation} for {v.full_name}.", - util.DataQueryEvent) - v.deactivate(chained_exc) - continue - self.post_query_hook(vars_to_query) - else: - # only hit this if we don't break - raise util.DataRequestError( - f"Too many iterations in query_data() for {self.full_name}." - ) - - def select_data(self): - update = True - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - if update: - # query alternates for any vars that failed since last time - self.query_data() - update = False - # this loop differs from the others in that logic isn't/can't be - # done on a per-variable basis, so we just try to execute - # set_experiment() successfully - try: - self.set_experiment() - break # successful exit - except util.DataExperimentEvent: - # couldn't set consistent experiment attributes. Try again b/c - # we've deactivated problematic pods/vars. - update = True - except Exception as exc: - self.log.exception("%s while setting experiment: %r", - util.exc_descriptor(exc), exc) - raise exc - else: - # only hit this if we don't break - raise util.DataQueryEvent( - f"Too many iterations in select_data() for {self.full_name}." - ) - - def fetch_data(self): - update = True - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - if update: - self.select_data() - update = False - vars_to_fetch = [ - v for v in self.iter_vars_only(active=True) \ - if v.stage < varlistentry_util.VarlistEntryStage.FETCHED - ] - if not vars_to_fetch: - break # exit: fetched everything or nothing active - - self.log.debug('Fetch batch: [%s].', - ', '.join(v.full_name for v in vars_to_fetch)) - self.pre_fetch_hook(vars_to_fetch) - for v in vars_to_fetch: - try: - v.log.info("Fetching %s.", v) - # fetch on a per-DataKey basis - for d_key in v.iter_data_keys(status=core.ObjectStatus.ACTIVE): - try: - if not self.is_fetch_necessary(d_key): - continue - v.log.debug("Fetching %s.", d_key) - self.fetch_dataset(v, d_key) - except Exception as exc: - update = True - d_key.deactivate(exc) - break # no point continuing - - # check if var received everything - for d_key in v.iter_data_keys(status=core.ObjectStatus.ACTIVE): - if not d_key.local_data: - raise util.DataFetchEvent("Fetch failed.", d_key) - v.stage = varlistentry_util.VarlistEntryStage.FETCHED - except Exception as exc: - update = True - chained_exc = util.chain_exc(exc, - f"fetching data for {v.full_name}.", - util.DataFetchEvent) - v.deactivate(chained_exc) - continue - self.post_fetch_hook(vars_to_fetch) - else: - # only hit this if we don't break - raise util.DataRequestError( - f"Too many iterations in fetch_data() for {self.full_name}." - ) - - def preprocess_data(self): - """Hook to run the preprocessing function on all variables. - """ - update = True - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - if update: - # fetch alternates for any vars that failed since last time - self.fetch_data() - update = False - vars_to_process = [ - pv for pv in self.iter_vars(active=True) - if pv.var.stage < varlistentry_util.VarlistEntryStage.PREPROCESSED - ] - if not vars_to_process: - break # exit: processed everything or nothing active - - for pod in self.iter_children(status=core.ObjectStatus.ACTIVE): - pod.preprocessor.setup(self, pod) - for pv in vars_to_process: - try: - pv.pod.preprocessor.process(pv.var) - pv.var.stage = varlistentry_util.VarlistEntryStage.PREPROCESSED - except Exception as exc: - update = True - self.log.exception("%s while preprocessing %s: %r", - util.exc_descriptor(exc), pv.var.full_name, exc) - for d_key in pv.var.iter_data_keys(status=core.ObjectStatus.ACTIVE): - pv.var.deactivate_data_key(d_key, exc) - continue - else: - # only hit this if we don't break - raise util.DataRequestError( - f"Too many iterations in preprocess_data() for {self.full_name}." - ) - - def request_data(self): - """Top-level method to iteratively query, fetch and preprocess all data - requested by PODs, switching to alternate requested data as needed. - """ - # Call cleanup method if we're killed - signal.signal(signal.SIGTERM, self.query_and_fetch_cleanup) - signal.signal(signal.SIGINT, self.query_and_fetch_cleanup) - self.pre_query_and_fetch_hook() - try: - self.preprocess_data() - except Exception as exc: - self.log.exception("%s at DataSource level: %r.", - util.exc_descriptor(exc), exc) - # clean up regardless of success/fail - self.post_query_and_fetch_hook() - for p in self.iter_children(): - for v in p.iter_children(): - if v.status == core.ObjectStatus.ACTIVE: - v.log.debug('Data request for %s completed successfully.', - v.full_name) - v.status = core.ObjectStatus.SUCCEEDED - elif v.failed: - v.log.debug('Data request for %s failed.', v.full_name) - else: - v.log.debug('Data request for %s not used.', v.full_name) - if p.failed: - p.log.debug('Data request for %s failed.', p.full_name) - else: - p.log.debug('Data request for %s completed successfully.', - p.full_name) - - def query_and_fetch_cleanup(self, signum=None, frame=None): - """Called if framework is terminated abnormally. Not called during - normal exit. - """ - util.signal_logger(self.__class__.__name__, signum, frame, log=self.log) - self.post_query_and_fetch_hook() - util.exit_handler(code=1) - - -class MultirunDataSourceQFPMixin(DataSourceQFPMixin, ABC): - """Mixin implementing data query, fetch, and preprocessing-related attributes and - methods - """ - - @property - def _children(self): - """Iterable of child objects (:class:`~diagnostic.MultirunDiagnostic`\s) - associated with this object. - No-op because ~MDTFFramework._children provides pod data via the parent parameter - """ - pass - - def iter_vars(self, parent, active=None, pod_active=None): - """Iterator over all :class:`~diagnostic.VarlistEntry`\s (grandchildren) - associated with this case. Returns :class:`PodVarTuple`\s (namedtuples) - of the :class:`~diagnostic.Diagnostic` and :class:`~diagnostic.VarlistEntry` - objects corresponding to the POD and its variable, respectively. - - Args: - parent: the MultirunDiagnostic parent class instance that contains - the pod attributes - active: bool or None, default None. Selects subset of - :class:`~diagnostic.VarlistEntry`\s returned in the - namedtuples: - - - active = True: only iterate over currently active VarlistEntries. - - active = False: only iterate over inactive VarlistEntries - (VarlistEntries which have either failed or are currently - unused alternate variables). - - active = None: iterate over both active and inactive - VarlistEntries. - - pod_active: bool or None, default None. Same as *active*, but - filtering the PODs that are selected. - """ - - def _get_kwargs(active_): - if active_ is None: - return {'status': None} - if active_: - return {'status': core.ObjectStatus.ACTIVE} - else: - return {'status_neq': core.ObjectStatus.ACTIVE} - pod_kwargs = _get_kwargs(pod_active) - var_kwargs = _get_kwargs(active) - # for p in parent.iter_children(**pod_kwargs): # _children returns pod values for multirun mode. - # Defined in core.MDTFFramework - p = parent - for v in self.iter_children(**var_kwargs): # _children returns varlist values. Defined in data.sources - yield PodVarTuple(pod=p, var=v) - - def iter_vars_only(self, parent, active=None): - """Convenience wrapper for :meth:`iter_vars` that returns only the - :class:`~MultirunDiagnostic.VarlistEntry` objects (grandchildren) from all PODs - in this DataSource. - """ - yield from (pv.var for pv in self.iter_vars(parent, active=active, pod_active=None)) - - def query_data(self, parent): - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - vars_to_query = [ - v for v in self.iter_vars_only(parent, active=True) \ - if v.stage < varlistentry_util.VarlistEntryStage.QUERIED - ] - if not vars_to_query: - break # exit: queried everything or nothing active - - self.log.debug('Query batch: [%s].', - ', '.join(v.full_name for v in vars_to_query)) - self.pre_query_hook(vars_to_query) - for v in vars_to_query: - try: - self.log.info("Querying %s.", v.translation) - self.query_dataset(v) # sets v.data - if not v.data: - raise util.DataQueryEvent("No data found.", v) - v.stage = varlistentry_util.VarlistEntryStage.QUERIED - except util.DataQueryEvent as exc: - v.deactivate(exc) - continue - except Exception as exc: - chained_exc = util.chain_exc(exc, - f"querying {v.translation} for {v.full_name}.", - util.DataQueryEvent) - v.deactivate(chained_exc) - continue - self.post_query_hook(vars_to_query) - else: - # only hit this if we don't break - raise util.DataRequestError( - f"Too many iterations in query_data() for {self.full_name}." - ) - - def select_data(self, parent): - update = True - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - if update: - # query alternates for any vars that failed since last time - self.query_data(parent) - update = False - # this loop differs from the others in that logic isn't/can't be - # done on a per-variable basis, so we just try to execute - # set_experiment() successfully - try: - self.set_experiment(parent) - break # successful exit - except util.DataExperimentEvent: - # couldn't set consistent experiment attributes. Try again b/c - # we've deactivated problematic pods/vars. - update = True - except Exception as exc: - self.log.exception("%s while setting experiment: %r", - util.exc_descriptor(exc), exc) - raise exc - else: - # only hit this if we don't break - raise util.DataQueryEvent( - f"Too many iterations in select_data() for {self.full_name}." - ) - - def fetch_data(self, parent): - update = True - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - if update: - self.select_data(parent) - update = False - vars_to_fetch = [ - v for v in self.iter_vars_only(parent, active=True) \ - if v.stage < varlistentry_util.VarlistEntryStage.FETCHED - ] - if not vars_to_fetch: - break # exit: fetched everything or nothing active - - self.log.debug('Fetch batch: [%s].', - ', '.join(v.full_name for v in vars_to_fetch)) - self.pre_fetch_hook(vars_to_fetch) - for v in vars_to_fetch: - try: - v.log.info("Fetching %s.", v) - # fetch on a per-DataKey basis - for d_key in v.iter_data_keys(status=core.ObjectStatus.ACTIVE): - try: - if not self.is_fetch_necessary(d_key): - continue - v.log.debug("Fetching %s.", d_key) - self.fetch_dataset(v, d_key) - except Exception as exc: - update = True - d_key.deactivate(exc) - break # no point continuing - - # check if var received everything - for d_key in v.iter_data_keys(status=core.ObjectStatus.ACTIVE): - if not d_key.local_data: - raise util.DataFetchEvent("Fetch failed.", d_key) - v.stage = varlistentry_util.VarlistEntryStage.FETCHED - except Exception as exc: - update = True - chained_exc = util.chain_exc(exc, - f"fetching data for {v.full_name}.", - util.DataFetchEvent) - v.deactivate(chained_exc) - continue - self.post_fetch_hook(vars_to_fetch) - else: - # only hit this if we don't break - raise util.DataRequestError( - f"Too many iterations in fetch_data() for {self.full_name}." - ) - - def preprocess_data(self, parent): - """Hook to run the preprocessing function on all variables. - """ - update = True - # really a while-loop, but limit # of iterations to be safe - for _ in range(MAX_DATASOURCE_ITERS): - if update: - # fetch alternates for any vars that failed since last time - self.fetch_data(parent) - update = False - vars_to_process = [ - pv for pv in self.iter_vars(parent, active=True) \ - if pv.var.stage < varlistentry_util.VarlistEntryStage.PREPROCESSED - ] - if not vars_to_process: - break # exit: processed everything or nothing active - - for pod in parent.iter_children(status=core.ObjectStatus.ACTIVE): - parent.preprocessor.setup(self, pod) - for pv in vars_to_process: - try: - pv.var.log.info("Preprocessing %s.", pv.var) - parent.preprocessor.process(pv.var, self.name) - # pv.pod.preprocessor.process(pv.var, self.name) - pv.var.stage = varlistentry_util.VarlistEntryStage.PREPROCESSED - except Exception as exc: - update = True - self.log.exception("%s while preprocessing %s: %r", - util.exc_descriptor(exc), pv.var.full_name, exc) - for d_key in pv.var.iter_data_keys(status=core.ObjectStatus.ACTIVE): - pv.var.deactivate_data_key(d_key, exc) - continue - else: - # only hit this if we don't break - raise util.DataRequestError( - f"Too many iterations in preprocess_data() for {self.full_name}." - ) - - def request_data(self, parent): - """Top-level method to iteratively query, fetch and preprocess all data - requested by PODs, switching to alternate requested data as needed. - """ - # Call cleanup method if we're killed - signal.signal(signal.SIGTERM, self.query_and_fetch_cleanup) - signal.signal(signal.SIGINT, self.query_and_fetch_cleanup) - self.pre_query_and_fetch_hook() - try: - self.preprocess_data(parent) - except Exception as exc: - self.log.exception("%s at DataSource level: %r.", - util.exc_descriptor(exc), exc) - # clean up regardless of success/fail - self.post_query_and_fetch_hook() - for p in self.iter_children(): - for v in p.iter_children(): - if v.status == core.ObjectStatus.ACTIVE: - v.log.debug('Data request for %s completed successfully.', - v.full_name) - v.status = core.ObjectStatus.SUCCEEDED - elif v.failed: - v.log.debug('Data request for %s failed.', v.full_name) - else: - v.log.debug('Data request for %s not used.', v.full_name) - if p.failed: - p.log.debug('Data request for %s failed.', p.full_name) - else: - p.log.debug('Data request for %s completed successfully.', - p.full_name) - diff --git a/src/tests/dummy_config.json b/src/tests/dummy_config.json index a270c85ca..6022802c2 100644 --- a/src/tests/dummy_config.json +++ b/src/tests/dummy_config.json @@ -1,32 +1,38 @@ { - "case_list" : [ + "pod_list" : [ + "example_multicase" + ], + "case_list": { - "CASENAME" : "DUMMY.CASENAME", - "model" : "DUMMY_MODEL", - "convention" : "CF", - "FIRSTYR" : 0, - "LASTYR" : 100, - "pod_list": [ - "example" - ] - } - ], + "TEST_01": + { + "model": "test", + "convention": "CMIP", + "startdate": "00000101", + "enddate": "01001231" + } + , + "TEST_02": + { + "model": "test", + "convention": "CMIP", + "startdate": "01010101120000", + "enddate": "02001231000000" + } + }, "OBS_DATA_ROOT": "/DUMMY/PATH/OBS_DATA_ROOT", "MODEL_DATA_ROOT": "/DUMMY/PATH/MODEL_DATA_ROOT", - "WORKING_DIR": "/DUMMY/PATH/WORKING_DIR", + "WORK_DIR": "/DUMMY/PATH/WORKING_DIR", "OUTPUT_DIR": "/DUMMY/PATH/OUTPUT_DIR", - "data_manager": "Local_File", - "environment_manager": "Conda", + "micromamba_exe": "/DUMMY/PATH/.local/bin/micromamba", "conda_root": "/DUMMY/PATH/conda_root", "conda_env_root": "/DUMMY/PATH/conda_env_root", - "preprocessor": "SampleModelData", "file_transfer_timeout": 300, "save_ps": true, "save_nc": false, "make_variab_tar": false, "overwrite": false, "verbose": 1, - "test_mode": false, - "dry_run": false, - "keep_temp": false + "keep_temp": false, + "translate_data": true } diff --git a/src/tests/shared_test_utils.py b/src/tests/shared_test_utils.py index abcb97b0b..e22d1a910 100644 --- a/src/tests/shared_test_utils.py +++ b/src/tests/shared_test_utils.py @@ -1,10 +1,9 @@ import os -import sys import collections import dataclasses import subprocess -from src.util import read_json, NameSpace, to_iter -from src import core, cli +from src.util import read_json +from src import cli, translation, util @dataclasses.dataclass class DummyMDTFFramework(object): @@ -18,20 +17,12 @@ class DummyMDTFFramework(object): def setUp_config_singletons(config=None, paths=None, pods=None, unittest=True): cwd = os.path.dirname(os.path.realpath(__file__)) code_root = os.path.dirname(os.path.dirname(cwd)) - cli_obj = cli.MDTFTopLevelArgParser( - code_root, - skip_defaults=True, - argv= f"-f {os.path.join(cwd, 'dummy_config.json')}" - ) - cli_obj.config = vars(cli_obj.parse_args()) - if config: - cli_obj.config.update(config) PodDataTuple = collections.namedtuple( 'PodDataTuple', 'sorted_lists pod_data realm_data' ) dummy_pod_data = PodDataTuple( - pod_data=pods, realm_data=dict(), sorted_lists={'pods': [], 'realms':[]} + pod_data=pods, realm_data=dict(), sorted_lists={'pods': [], 'realms': []} ) _ = core.ConfigManager(cli_obj, dummy_pod_data, unittest=unittest) @@ -39,50 +30,42 @@ def setUp_config_singletons(config=None, paths=None, pods=None, unittest=True): pm.CODE_ROOT = code_root if paths: pm.update(paths) - translate = core.VariableTranslator(code_root, unittest=unittest) + translate = translation.VariableTranslator(code_root, unittest=unittest) translate.read_conventions(code_root, unittest=unittest) - _ = core.TempDirManager(None, unittest=unittest) + _ = util.TempDirManager(None, unittest=unittest) + def tearDown_config_singletons(): # clear Singletons try: - temp = core.ConfigManager(unittest=True) - temp._reset() - except Exception: - pass - try: - temp = core.PathManager(unittest=True) + temp = util.ModelDataPathManager(config, unittest=True) temp._reset() - except Exception: + except Exception as exc: + print(exc) pass try: - temp = core.VariableTranslator(unittest=True) + temp = translation.VariableTranslator(config.CODE_ROOT, unittest=True) temp._reset() - except Exception: - pass - try: - temp = core.TempDirManager(unittest=True) - temp._reset() - except Exception: + except Exception as exc: + print(exc) pass # ------------------------------------------------------------- + def get_configuration(config_file='', check_input=False, check_output=False): # Redundant with code in util; need to fix this - cwd = os.path.dirname(os.path.realpath(__file__)) # gets dir of currently executing script - code_root = os.path.realpath(os.path.join(cwd, '..')) # parent dir of that + cwd = os.path.dirname(os.path.realpath(__file__)) # gets dir of currently executing script + code_root = os.path.realpath(os.path.join(cwd, '..')) # parent dir of that if config_file == '': - config_file = os.path.join(cwd,'..','src','mdtf_settings.json') # default + config_file = os.path.join(cwd, '..', 'src', 'mdtf_settings.json') # default config = read_json(config_file) - config = parse_mdtf_args(None, config, rel_paths_root=code_root) - config['paths']['md5_path'] = os.path.join(cwd,'checksums') + config['paths']['md5_path'] = os.path.join(cwd, 'checksums') # config['paths']['OBS_ROOT_DIR'] = os.path.realpath(config['paths']['OBS_ROOT_DIR']) # config['paths']['MODEL_ROOT_DIR'] = os.path.realpath(config['paths']['MODEL_ROOT_DIR']) # config['paths']['OUTPUT_DIR'] = os.path.realpath(config['paths']['OUTPUT_DIR']) - # assert os.path.isdir(config['paths']['md5_path']) # if check_input: # assert os.path.isdir(config['paths']['OBS_ROOT_DIR']) @@ -91,9 +74,10 @@ def get_configuration(config_file='', check_input=False, check_output=False): # assert os.path.isdir(config['paths']['OUTPUT_DIR']) return config + def get_test_data_configuration(): - cwd = os.path.dirname(os.path.realpath(__file__)) # gets dir of currently executing script - case_list = read_json(os.path.join(cwd,'pod_test_configs.json')) + cwd = os.path.dirname(os.path.realpath(__file__)) # gets dir of currently executing script + case_list = read_json(os.path.join(cwd, 'pod_test_configs.json')) models = [] pods = [] for i, case in enumerate(case_list['case_list']): @@ -105,6 +89,7 @@ def get_test_data_configuration(): case_list['models'] = models return case_list + def configure_pods(case_list, config_to_insert=[]): # set up configuration, one for each POD cases_by_pod = {} @@ -117,8 +102,9 @@ def configure_pods(case_list, config_to_insert=[]): cases_by_pod[pod]['case_list'][0]['pod_list'] = [pod] return cases_by_pod + def checksum_function(file_path): - IMAGE_FILES = ['.eps','.ps','.png','.gif','.jpg','.jpeg'] + IMAGE_FILES = ['.eps', '.ps', '.png', '.gif', '.jpg', '.jpeg'] print(os.path.split(file_path)[1]) ext = os.path.splitext(file_path)[1] @@ -126,14 +112,15 @@ def checksum_function(file_path): # use ImageMagick 'identify' command which ignores file creation time # metadata in image header file and only hashes actual image data. # See https://stackoverflow.com/a/41706704 - checksum = subprocess.check_output('identify -format "%#" '+file_path, shell=True) + checksum = subprocess.check_output('identify -format "%#" ' + file_path, shell=True) checksum = checksum.split('\n')[0] else: # fallback method: system md5 - checksum = subprocess.check_output('md5sum '+file_path, shell=True) + checksum = subprocess.check_output('md5sum ' + file_path, shell=True) checksum = checksum.split(' ')[0] return checksum + def checksum_files_in_subtree(dir, exclude_exts=[]): start_cwd = os.getcwd() checksum_dict = {} @@ -143,12 +130,13 @@ def checksum_files_in_subtree(dir, exclude_exts=[]): files = files.split('\n') # f[2:] removes the "./" at the beginning of each entry files = [f[2:] for f in files if - f != '' and (os.path.splitext(f)[1] not in exclude_exts)] + f != '' and (os.path.splitext(f)[1] not in exclude_exts)] for f in files: checksum_dict[f] = checksum_function(os.path.join(dir, f)) os.chdir(start_cwd) return checksum_dict + def generate_checksum_test(name, path, reference_dict, include_exts=[]): def test(self): self.assertIn(name, reference_dict) @@ -159,10 +147,10 @@ def test(self): ext = os.path.splitext(key)[1] if include_exts == [] or (ext in include_exts): self.assertIn(key, test_dict, - 'Failure: {} not found in {}'.format( - key, os.path.join(path, name))) + 'Failure: {} not found in {}'.format( + key, os.path.join(path, name))) self.assertEqual(test_dict[key], reference_dict[name][key], - 'Failure: Hash of {} differs from reference.'.format( - os.path.join(path, name, key))) - return test + 'Failure: Hash of {} differs from reference.'.format( + os.path.join(path, name, key))) + return test diff --git a/src/tests/test_data_manager.py b/src/tests/test_data_manager.py index d6b7e525b..e1a204d57 100644 --- a/src/tests/test_data_manager.py +++ b/src/tests/test_data_manager.py @@ -1,13 +1,15 @@ import os import unittest -import unittest.mock as mock # define mock os.environ so we don't mess up real env vars +import unittest.mock as mock # define mock os.environ so we don't mess up real env vars import src.util as util -from src.diagnostic import Diagnostic +from src import translation, data_sources, pod_setup + + # from src.data_manager import DataManager # from src.tests.shared_test_utils import setUp_ConfigManager, tearDown_ConfigManager @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") -#@mock.patch.multiple(DataManager, __abstractmethods__=set()) +# @mock.patch.multiple(DataManager, __abstractmethods__=set()) class TestDataManagerSetup(unittest.TestCase): # pylint: disable=abstract-class-instantiated default_case = { @@ -15,26 +17,22 @@ class TestDataManagerSetup(unittest.TestCase): 'pod_list': ['C'] } default_pod_CF = { - 'settings':{}, - 'varlist':[{'var_name': 'pr_var', 'freq':'mon'}] - } + 'settings': {}, + 'varlist': [{'var_name': 'pr_var', 'freq': 'mon'}] + } dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) def setUp(self, mock_read_json): - setUp_ConfigManager( - config=self.default_case, - paths=self.dummy_paths, - pods={'C': self.default_pod_CF} - ) - _ = configs.VariableTranslator(unittest = True) + + _ = translation.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() @@ -48,9 +46,12 @@ def test_setup_model_paths(self): @unittest.expectedFailure def test_set_model_env_vars(self): # set env vars for model - case = DataManager(self.default_case) + case = data_sources.data_source[case_dict.convention.upper() + "DataSource"](case_name, + case_dict, + model_paths, + parent=None) case.convention = 'not_CF' - dummy = {'envvars':{}} + dummy = {'envvars': {}} case._set_model_env_vars(dummy) self.assertEqual(os.environ['pr_var'], 'PRECT') self.assertEqual(os.environ['prc_var'], 'PRECC') @@ -66,7 +67,10 @@ def test_setup_html(self): pass def test_setup_pod_cf_cf(self): - case = DataManager(self.default_case) + case = data_sources.data_source[case_dict.convention.upper() + "DataSource"](case_name, + case_dict, + model_paths, + parent=None) pod = Diagnostic('C') case._setup_pod(pod) self.assertEqual(pod.varlist[0].CF_name, 'pr_var') @@ -80,8 +84,9 @@ def test_setup_pod_cf_custom(self): self.assertEqual(pod.varlist[0].CF_name, 'pr_var') self.assertEqual(pod.varlist[0].name_in_model, 'PRECT') + @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") -#@mock.patch.multiple(DataManager, __abstractmethods__=set()) +# @mock.patch.multiple(DataManager, __abstractmethods__=set()) class TestDataManagerSetupNonCFPod(unittest.TestCase): # pylint: disable=abstract-class-instantiated @@ -90,16 +95,16 @@ class TestDataManagerSetupNonCFPod(unittest.TestCase): 'pod_list': ['C'] } default_pod_not_CF = { - 'settings': {'variable_convention':'not_CF'}, - 'varlist': [{'var_name': 'PRECT', 'freq':'mon'}] - } + 'settings': {'variable_convention': 'not_CF'}, + 'varlist': [{'var_name': 'PRECT', 'freq': 'mon'}] + } dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) @@ -109,7 +114,7 @@ def setUp(self, mock_read_json): paths=self.dummy_paths, pods={'C': self.default_pod_not_CF} ) - _ = configs.VariableTranslator(unittest = True) + _ = configs.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() @@ -131,8 +136,8 @@ def test_setup_pod_custom_custom(self): self.assertEqual(pod.varlist[0].name_in_model, 'PRECT') @unittest.skip("") - @mock.patch('src.diagnostic.util.read_json', return_value = { - 'settings':{'conda_env':'B'},'varlist':[]}) + @mock.patch('src.diagnostic.util.read_json', return_value={ + 'settings': {'conda_env': 'B'}, 'varlist': []}) def test_parse_pod_settings_conda_env(self, mock_read_json): # fill in conda environment pod = Diagnostic('A') @@ -142,22 +147,22 @@ def test_parse_pod_settings_conda_env(self, mock_read_json): @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") class TestDataManagerFetchData(unittest.TestCase): @mock.patch('src.util.read_json', - return_value = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} - }) + return_value={ + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} + }) def setUp(self, mock_read_json): # set up translation dictionary without calls to filesystem - _ = util.VariableTranslator(unittest = True) - _ = util.PathManager(unittest = True) + _ = util.VariableTranslator(unittest=True) + _ = util.PathManager(unittest=True) def tearDown(self): # call _reset method deleting clearing Translator for unit testing, # otherwise the second, third, .. tests will use the instance created # in the first test instead of being properly initialized - temp = util.VariableTranslator(unittest = True) + temp = util.VariableTranslator(unittest=True) temp._reset() - temp = util.PathManager(unittest = True) + temp = util.PathManager(unittest=True) temp._reset() # --------------------------------------------------- @@ -168,4 +173,4 @@ def tearDown(self): if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/src/tests/test_diagnostic.py b/src/tests/test_diagnostic.py index a9c72ab7f..36e553758 100644 --- a/src/tests/test_diagnostic.py +++ b/src/tests/test_diagnostic.py @@ -1,27 +1,25 @@ import os -import sys import unittest -import unittest.mock as mock # define mock os.environ so we don't mess up real env vars +import unittest.mock as mock # define mock os.environ so we don't mess up real env vars import src.util as util -#from src.data_manager import DataSet, DataManager +from src import translation, pod_setup from src.util.datelabel import DateFrequency -# from src.diagnostic import Diagnostic, PodRuntimeError -# from src.tests.shared_test_utils import setUp_ConfigManager, tearDown_ConfigManager + @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") class TestDiagnosticInit(unittest.TestCase): # pylint: disable=maybe-no-member default_pod_CF = { - 'settings':{}, - 'varlist':[{'var_name': 'pr_var', 'freq':'mon'}] - } + 'settings': {}, + 'varlist': [{'var_name': 'pr_var', 'freq': 'mon'}] + } dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) @@ -30,7 +28,7 @@ def setUp(self, mock_read_json): paths=self.dummy_paths, pods={'DUMMY_POD': self.default_pod_CF} ) - _ = configs.VariableTranslator(unittest = True) + _ = translation.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() @@ -40,7 +38,7 @@ def tearDown(self): def test_parse_pod_settings(self): # normal operation config = configs.ConfigManager(unittest=True) - config.pods['DUMMY_POD'] = {'settings':{'required_programs':'B'},'varlist':[]} + config.pods['DUMMY_POD'] = {'settings': {'required_programs': 'B'}, 'varlist': []} pod = Diagnostic('DUMMY_POD') self.assertEqual(pod.name, 'DUMMY_POD') self.assertEqual(pod.required_programs, 'B') @@ -49,8 +47,8 @@ def test_parse_pod_varlist(self): # normal operation config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{},'varlist':[{ - 'var_name': 'pr_var', 'freq':'mon', 'requirement':'required' + 'settings': {}, 'varlist': [{ + 'var_name': 'pr_var', 'freq': 'mon', 'requirement': 'required' }] } pod = Diagnostic('DUMMY_POD') @@ -60,15 +58,15 @@ def test_parse_pod_varlist_defaults(self): # fill in defaults config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{},'varlist':[{ - 'var_name': 'pr_var', 'freq':'mon', 'alternates':'foo' + 'settings': {}, 'varlist': [{ + 'var_name': 'pr_var', 'freq': 'mon', 'alternates': 'foo' }] } test_ds = DataSet({ - 'name':'foo', 'freq':'mon', - 'CF_name':'foo', 'required': True, - 'original_name':'pr_var', 'alternates':[] - }) + 'name': 'foo', 'freq': 'mon', + 'CF_name': 'foo', 'required': True, + 'original_name': 'pr_var', 'alternates': [] + }) pod = Diagnostic('DUMMY_POD') self.assertEqual(pod.varlist[0]['required'], True) self.assertEqual(len(pod.varlist[0]['alternates']), 1) @@ -77,27 +75,28 @@ def test_parse_pod_varlist_defaults(self): def test_parse_pod_varlist_freq(self): config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{},'varlist':[{ - 'var_name': 'pr_var', 'freq':'not_a_frequency' + 'settings': {}, 'varlist': [{ + 'var_name': 'pr_var', 'freq': 'not_a_frequency' }] } self.assertRaises(AssertionError, Diagnostic, 'A') + @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") class TestDiagnosticSetUp(unittest.TestCase): # pylint: disable=maybe-no-member - default_pod = {'settings':{}, 'varlist':[]} + default_pod = {'settings': {}, 'varlist': []} default_case = { 'CASENAME': 'A', 'model': 'B', 'FIRSTYR': 1900, 'LASTYR': 2100, 'pod_list': ['C'] } dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) @@ -107,7 +106,7 @@ def setUp(self, mock_read_json): paths=self.dummy_paths, pods={'DUMMY_POD': self.default_pod} ) - _ = configs.VariableTranslator(unittest = True) + _ = configs.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() @@ -116,7 +115,7 @@ def tearDown(self): @unittest.skip("") # @mock.patch.multiple(DataManager, __abstractmethods__=set()) - @mock.patch('os.path.exists', return_value = True) + @mock.patch('os.path.exists', return_value=True) def test_set_pod_env_vars_paths(self, mock_exists): # check definition of pod paths case = DataManager(self.default_case) @@ -126,68 +125,66 @@ def test_set_pod_env_vars_paths(self, mock_exists): pod._set_pod_env_vars() self.assertEqual(pod.pod_env_vars['POD_HOME'], 'TEST_CODE_ROOT/diagnostics/C') self.assertEqual(pod.pod_env_vars['OBS_DATA'], 'TEST_OBS_DATA_ROOT/C') - self.assertEqual(pod.pod_env_vars['WK_DIR'], 'A') + self.assertEqual(pod.pod_env_vars['WORK_DIR'], 'A') @mock.patch('src.util.check_dir') - @mock.patch('os.path.exists', return_value = False) + @mock.patch('os.path.exists', return_value=False) @mock.patch('os.makedirs') def test_setup_pod_directories_mkdir(self, mock_makedirs, mock_exists, \ - mock_check_dirs): + mock_check_dirs): # create output dirs if not present pod = Diagnostic('DUMMY_POD') pod.POD_WK_DIR = 'A/B' pod._setup_pod_directories() mock_makedirs.assert_has_calls([ - mock.call('A/B/'+ s) for s in [ - '','model','model/PS','model/netCDF','obs','obs/PS','obs/netCDF' + mock.call('A/B/' + s) for s in [ + '', 'model', 'model/PS', 'model/netCDF', 'obs', 'obs/PS', 'obs/netCDF' ] - ], any_order = True) + ], any_order=True) - @mock.patch('os.path.exists', return_value = True) + @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') def test_setup_pod_directories_no_mkdir(self, mock_makedirs, mock_exists): # don't create output dirs if already present - pod = Diagnostic('DUMMY_POD') - pod.POD_WK_DIR = 'A' + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) + pod.paths.POD_WORK_DIR = 'A' pod._setup_pod_directories() mock_makedirs.assert_not_called() - @mock.patch('os.path.exists', return_value = True) + @mock.patch('os.path.exists', return_value=True) def test_check_pod_driver_no_driver_1(self, mock_exists): # fill in driver from pod name programs = util.get_available_programs() - pod = Diagnostic('DUMMY_POD') + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) pod.set_entry_point() ext = os.path.splitext(pod.driver)[1][1:] self.assertTrue(ext in programs) self.assertEqual(pod.program, programs[ext]) - @mock.patch('os.path.exists', return_value = False) + @mock.patch('os.path.exists', return_value=False) def test_check_pod_driver_no_driver_2(self, mock_exists): # assertion fails if no driver found - pod = Diagnostic('DUMMY_POD') - self.assertRaises(PodRuntimeError, pod.set_entry_point) + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) + self.assertRaises(util.PodRuntimeError, pod.set_entry_point) + @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") class TestDiagnosticCheckVarlist(unittest.TestCase): # pylint: disable=maybe-no-member - default_pod = {'settings':{}, 'varlist':[]} + default_pod = {'settings': {}, 'varlist': []} dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) def setUp(self, mock_read_json): - setUp_ConfigManager( - paths=self.dummy_paths, - pods={'DUMMY_POD': self.default_pod} - ) - _ = configs.VariableTranslator(unittest = True) + + _ = translation.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() @@ -196,8 +193,8 @@ def tearDown(self): def _populate_pod__local_data(self, pod): # reproduce logic in DataManager._setup_pod rather than invoke it here - config = configs.ConfigManager(unittest = True) - translate = configs.VariableTranslator(unittest = True) + config = configs.ConfigManager(unittest=True) + translate = translation.VariableTranslator(unittest=True) case_name = 'A' ds_list = [] @@ -215,75 +212,76 @@ def _populate_pod__local_data(self, pod): case_name, var.name_in_model, freq) ) - @mock.patch('os.path.isfile', return_value = True) + @mock.patch('os.path.isfile', return_value=True) def test_check_for_varlist_files_found(self, mock_isfile): # case file is found config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{}, 'varlist':[ - {'var_name': 'pr_var', 'freq':'mon'} - ]} - pod = Diagnostic('DUMMY_POD') + 'settings': {}, 'varlist': [ + {'var_name': 'pr_var', 'freq': 'mon'} + ]} + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) self._populate_pod__local_data(pod) (found, missing) = pod._check_for_varlist_files(pod.varlist) self.assertEqual(found, ['TEST_MODEL_DATA_ROOT/A/mon/A.PRECT.mon.nc']) self.assertEqual(missing, []) - @mock.patch('os.path.isfile', return_value = False) + @mock.patch('os.path.isfile', return_value=False) def test_check_for_varlist_files_not_found(self, mock_isfile): # case file is required and not found config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{}, 'varlist':[ - {'var_name': 'pr_var', 'freq':'mon', 'required': True} - ]} - pod = Diagnostic('DUMMY_POD') + 'settings': {}, 'varlist': [ + {'var_name': 'pr_var', 'freq': 'mon', 'required': True} + ]} + pod_setup.PodObject('DUMMY_POD', ctx.config) self._populate_pod__local_data(pod) (found, missing) = pod._check_for_varlist_files(pod.varlist) self.assertEqual(found, []) self.assertEqual(missing, ['TEST_MODEL_DATA_ROOT/A/mon/A.PRECT.mon.nc']) - @mock.patch('os.path.isfile', side_effect = [False, True]) + @mock.patch('os.path.isfile', side_effect=[False, True]) def test_check_for_varlist_files_optional(self, mock_isfile): # case file is optional and not found config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{}, 'varlist':[ - {'var_name': 'pr_var', 'freq':'mon', 'required': False} - ]} - pod = Diagnostic('DUMMY_POD') + 'settings': {}, 'varlist': [ + {'var_name': 'pr_var', 'freq': 'mon', 'required': False} + ]} + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) self._populate_pod__local_data(pod) (found, missing) = pod._check_for_varlist_files(pod.varlist) self.assertEqual(found, []) self.assertEqual(missing, []) - @mock.patch('os.path.isfile', side_effect = [False, True]) + @mock.patch('os.path.isfile', side_effect=[False, True]) def test_check_for_varlist_files_alternate(self, mock_isfile): # case alternate variable is specified and found config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{}, 'varlist':[ - {'var_name': 'pr_var', 'freq':'mon', - 'required': True, 'alternates':['prc_var']} - ]} - pod = Diagnostic('DUMMY_POD') + 'settings': {}, 'varlist': [ + {'var_name': 'pr_var', 'freq': 'mon', + 'required': True, 'alternates': ['prc_var']} + ]} + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) self._populate_pod__local_data(pod) (found, missing) = pod._check_for_varlist_files(pod.varlist) # name_in_model translation now done in DataManager._setup_pod self.assertEqual(found, ['TEST_MODEL_DATA_ROOT/A/mon/A.PRECC.mon.nc']) self.assertEqual(missing, []) + @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") class TestDiagnosticSetUpCustomSettings(unittest.TestCase): # pylint: disable=maybe-no-member - default_pod = {'settings':{}, 'varlist':[]} + default_pod = {'settings': {}, 'varlist': []} dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) @@ -292,60 +290,61 @@ def setUp(self, mock_read_json): paths=self.dummy_paths, pods={'DUMMY_POD': self.default_pod} ) - _ = configs.VariableTranslator(unittest = True) + _ = translation.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() # --------------------------------------------------- - @mock.patch('os.path.exists', return_value = True) + @mock.patch('os.path.exists', return_value=True) def test_set_pod_env_vars_vars(self, mock_exists): # check definition of additional env vars config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{'pod_env_vars':{'D':'E'}}, 'varlist':[] + 'settings': {'pod_env_vars': {'D': 'E'}}, 'varlist': [] } - pod = Diagnostic('DUMMY_POD') - pod.POD_WK_DIR = 'A' + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) + pod.paths.POD_WORK_DIR = 'A' pod._set_pod_env_vars() self.assertEqual(os.environ['D'], 'E') self.assertEqual(pod.pod_env_vars['D'], 'E') @unittest.skip("") - @mock.patch('os.path.exists', return_value = True) + @mock.patch('os.path.exists', return_value=True) def test_check_pod_driver_program(self, mock_exists): # fill in absolute path and fill in program from driver's extension config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{'driver':'C.ncl'}, 'varlist':[] + 'settings': {'driver': 'C.ncl'}, 'varlist': [] } - pod = Diagnostic('DUMMY_POD') + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) pod.set_entry_point() self.assertEqual(pod.driver, 'TEST_CODE_ROOT/diagnostics/A/C.ncl') self.assertEqual(pod.program, 'ncl') - @mock.patch('os.path.exists', return_value = True) + @mock.patch('os.path.exists', return_value=True) def test_check_pod_driver_no_program_1(self, mock_exists): # assertion fail if can't recognize driver's extension config = configs.ConfigManager(unittest=True) config.pods['DUMMY_POD'] = { - 'settings':{'driver':'C.foo'}, 'varlist':[] + 'settings': {'driver': 'C.foo'}, 'varlist': [] } - pod = Diagnostic('DUMMY_POD') - self.assertRaises(PodRuntimeError, pod.set_entry_point) + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) + self.assertRaises(util.PodRuntimeError, pod.set_entry_point) + @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") class TestDiagnosticTearDown(unittest.TestCase): # pylint: disable=maybe-no-member - default_pod = {'settings':{}, 'varlist':[]} + default_pod = {'settings': {}, 'varlist': []} dummy_paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORKING_DIR': 'D', 'OUTPUT_DIR': 'E' } dummy_var_translate = { - 'convention_name':'not_CF', - 'var_names':{'pr_var': 'PRECT', 'prc_var':'PRECC'} + 'convention_name': 'not_CF', + 'var_names': {'pr_var': 'PRECT', 'prc_var': 'PRECC'} } @mock.patch('src.configs.util.read_json', return_value=dummy_var_translate) @@ -354,7 +353,7 @@ def setUp(self, mock_read_json): paths=self.dummy_paths, pods={'DUMMY_POD': self.default_pod} ) - _ = configs.VariableTranslator(unittest = True) + _ = translation.VariableTranslator(unittest=True) def tearDown(self): tearDown_ConfigManager() @@ -364,17 +363,17 @@ def tearDown(self): # expected to fail because error will be raised about missing TEMP_HTML # attribute, which is set when PODs are initialized by data_manager @unittest.expectedFailure - @mock.patch.dict('os.environ', {'CASENAME':'C'}) - @mock.patch('os.path.exists', return_value = True) + @mock.patch.dict('os.environ', {'CASENAME': 'C'}) + @mock.patch('os.path.exists', return_value=True) @mock.patch('shutil.copy2') @mock.patch('os.system') @mock.patch('os.remove') @mock.patch('src.util.append_html_template') def test_make_pod_html(self, mock_append_html_template, mock_remove, \ - mock_system, mock_copy2, mock_exists): - pod = Diagnostic('DUMMY_POD') - pod.MODEL_WK_DIR = '/B' - pod.POD_WK_DIR = '/B/DUMMY_POD' + mock_system, mock_copy2, mock_exists): + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) + model_paths.MODEL_WORK_DIR = '/B' + pod.paths.POD_WORK_DIR = '/B/DUMMY_POD' pod._make_pod_html() mock_copy2.assert_has_calls([ mock.call('TEST_CODE_ROOT/diagnostics/A/A.html', '/B/A'), @@ -388,14 +387,14 @@ def test_make_pod_html(self, mock_append_html_template, mock_remove, \ @unittest.skip("") @mock.patch.dict('os.environ', { - 'convert_flags':'-C', 'convert_output_fmt':'png' - }) - @mock.patch('glob.glob', return_value = ['A/model/PS/B.ps']) + 'convert_flags': '-C', 'convert_output_fmt': 'png' + }) + @mock.patch('glob.glob', return_value=['A/model/PS/B.ps']) @mock.patch('subprocess.Popen') def test_convert_pod_figures(self, mock_subprocess, mock_glob): # assert we munged filenames correctly config = configs.ConfigManager(unittest=True) - pod = Diagnostic('DUMMY_POD') + pod = pod_setup.PodObject('DUMMY_POD', ctx.config) pod.POD_WK_DIR = 'A' pod._convert_pod_figures(config) mock_system.assert_has_calls([ @@ -407,5 +406,6 @@ def test_convert_pod_figures(self, mock_subprocess, mock_glob): def test_cleanup_pod_files(self): pass + if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/src/tests/test_environment_manager.py b/src/tests/test_environment_manager.py deleted file mode 100644 index e3867ddec..000000000 --- a/src/tests/test_environment_manager.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import sys -import unittest -import unittest.mock as mock # define mock os.environ so we don't mess up real env vars -import src.util as util -from src.environment_manager import SubprocessRuntimeManager - -class TestEnvironmentManager(unittest.TestCase): - test_config = {'case_list':[{}], 'pod_list':['X']} - - # --------------------------------------------------- - - def test_setUp(self): - pass #TODO - - # --------------------------------------------------- - - # @mock.patch.dict('os.environ', {'DIAG_HOME':'/HOME'}) - # @mock.patch('src.diagnostic.util.read_json', return_value = { - # 'settings':{'driver':'C.ncl', 'program':'nonexistent_program'}, 'varlist':[] - # }) - # @mock.patch('os.path.exists', return_value = True) - # def test_check_pod_driver_no_program_2(self, mock_exists, mock_read_json): - # # assertion fail if explicitly specified program not found - # pod = Diagnostic('A') - # self.assertRaises(AssertionError, pod.set_entry_point) - - # --------------------------------------------------- - - def test_run(self): - pass #TODO - -# --------------------------------------------------- - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/src/tests/test_mdtf.py b/src/tests/test_mdtf.py deleted file mode 100644 index 58083483d..000000000 --- a/src/tests/test_mdtf.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import unittest -import unittest.mock as mock # define mock os.environ so we don't mess up real env vars -from src.core import MDTFFramework -import src.util as util - -@unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") -class TestMDTFArgParsing(unittest.TestCase): - def setUp(self): - _ = configs.PathManager(unittest = True) - self.config_test = { - 'case_list':[{'A':'B'}], - 'paths':{'C':'/D'}, - 'settings':{'E':'F', 'verbose':0} - } - - def tearDown(self): - # call _reset method deleting clearing PathManager for unit testing, - # otherwise the second, third, .. tests will use the instance created - # in the first test instead of being properly initialized - temp = configs.PathManager(unittest = True) - temp._reset() - - def test_parse_mdtf_args_config(self): - # set paths from config file - args = {} - config = self.config_test.copy() - config = MDTFFramework.parse_mdtf_args(args, config) - self.assertEqual(config['paths']['C'], '/D') - self.assertEqual(config['settings']['E'], 'F') - - def test_parse_mdtf_args_config_cmdline(self): - # override config file with command line arguments - args = {'C':'/X', 'E':'Y'} - config = self.config_test.copy() - config = MDTFFramework.parse_mdtf_args(args, config) - self.assertEqual(config['paths']['C'], '/X') - self.assertEqual(config['settings']['E'], 'Y') - - @mock.patch('src.util.check_dir') - def test_set_mdtf_env_vars_config_settings(self, mock_check_dirs): - # NB env vars now only written to OS by pod's setup (not here) - # set settings from config file - mdtf = MDTFFramework.__new__(MDTFFramework) - mdtf.config = self.config_test.copy() - mdtf.set_mdtf_env_vars() - self.assertEqual(mdtf.config['envvars']['E'], 'F') - - @mock.patch('src.util.check_dir') - def test_sset_mdtf_env_vars_config_rgb(self, mock_check_dirs): - # NB env vars now only written to OS by pod's setup (not here) - # set path to /RGB from os.environ - mdtf = MDTFFramework.__new__(MDTFFramework) - mdtf.config = self.config_test.copy() - mdtf.set_mdtf_env_vars() - self.assertEqual(mdtf.config['envvars']['RGB'], 'TEST_CODE_ROOT/src/rgb') - -# --------------------------------------------------- - -if __name__ == '__main__': - unittest.main() diff --git a/src/tests/test_core.py b/src/tests/test_translation.py similarity index 64% rename from src/tests/test_core.py rename to src/tests/test_translation.py index f4baad2b9..2e737f9e4 100644 --- a/src/tests/test_core.py +++ b/src/tests/test_translation.py @@ -1,12 +1,7 @@ import os import unittest -from collections import namedtuple -import itertools -import unittest.mock as mock # define mock os.environ so we don't mess up real env vars -import src.core as core -# from src.data_manager import DataManager -import src.diagnostic as diagnostic from src.tests.shared_test_utils import setUp_config_singletons, tearDown_config_singletons +from src import data_sources, translation, varlist_util, pod_setup, util class TestVariableTranslator(unittest.TestCase): @@ -24,59 +19,63 @@ def tearDown(self): "PLACEHOLDER_X_COORD": {"axis": "X", "standard_name": "longitude", "units": "degrees_east"}, "PLACEHOLDER_Y_COORD": {"axis": "Y", "standard_name": "latitude", "units": "degrees_north"}, "PLACEHOLDER_Z_COORD": { - "standard_name": "air_pressure", - "units": "hPa", - "positive": "down", - "axis": "Z" + "standard_name": "air_pressure", + "units": "hPa", + "positive": "down", + "axis": "Z" }, "PLACEHOLDER_T_COORD": {"axis": "T", "standard_name": "time", "units": "days"} } def test_variabletranslator(self): - temp = core.VariableTranslator() + temp = translation.VariableTranslator() temp.add_convention({ - 'name':'not_CF', 'coords': self._dummy_coords_d, - 'variables':{ + 'name': 'not_CF', 'coords': self._dummy_coords_d, + 'variables': { 'PRECT': {"standard_name": "pr_var", "units": "1", "ndim": 3}, 'PRECC': {"standard_name": "prc_var", "units": "1", "ndim": 3} } - }) + }, + "") self.assertEqual(temp.to_CF_name('not_CF', 'PRECT'), 'pr_var') - self.assertEqual(temp.from_CF_name('not_CF', 'pr_var'), 'PRECT') + self.assertEqual(temp.from_CF_name('not_CF', 'pr_var', 'atmos'), 'PRECT') def test_variabletranslator_no_key(self): - temp = core.VariableTranslator() + temp = translation.VariableTranslator() temp.add_convention({ - 'name':'not_CF', 'coords': self._dummy_coords_d, - 'variables':{ + 'name': 'not_CF', 'coords': self._dummy_coords_d, + 'variables': { 'PRECT': {"standard_name": "pr_var", "units": "1", "ndim": 3}, 'PRECC': {"standard_name": "prc_var", "units": "1", "ndim": 3} } - }) + }, + "") self.assertRaises(KeyError, temp.to_CF_name, 'B', 'PRECT') self.assertRaises(KeyError, temp.to_CF_name, 'not_CF', 'nonexistent_var') - self.assertRaises(KeyError, temp.from_CF_name, 'B', 'PRECT') - self.assertRaises(KeyError, temp.from_CF_name, 'not_CF', 'nonexistent_var') + self.assertRaises(KeyError, temp.from_CF_name, 'B', 'PRECT', 'atmos') + self.assertRaises(KeyError, temp.from_CF_name, 'not_CF', 'nonexistent_var', 'blah') def test_variabletranslator_aliases(self): # create multiple entries when multiple models specified - temp = core.VariableTranslator() + temp = translation.VariableTranslator() temp.add_convention({ - 'name':'not_CF', 'coords': self._dummy_coords_d, + 'name': 'not_CF', 'coords': self._dummy_coords_d, 'models': ['A', 'B'], - 'variables':{ + 'variables': { 'PRECT': {"standard_name": "pr_var", "units": "1", "ndim": 3}, 'PRECC': {"standard_name": "prc_var", "units": "1", "ndim": 3} } - }) - self.assertEqual(temp.from_CF_name('not_CF', 'pr_var'), 'PRECT') - self.assertEqual(temp.from_CF_name('A','pr_var'), 'PRECT') - self.assertEqual(temp.from_CF_name('B','pr_var'), 'PRECT') + }, + "") + self.assertEqual(temp.from_CF_name('not_CF', 'pr_var', 'atmos'), 'PRECT') + self.assertEqual(temp.from_CF_name('A', 'pr_var', 'atmos'), 'PRECT') + self.assertEqual(temp.from_CF_name('B', 'pr_var', 'atmos'), 'PRECT') def test_variabletranslator_no_translation(self): dummy_varlist = { "data": { - "frequency": "day" + "frequency": "day", + "realm": "atmos" }, "dimensions": { "lat": {"standard_name": "latitude"}, @@ -91,9 +90,9 @@ def test_variabletranslator_no_translation(self): } } } - varlist = diagnostic.Varlist.from_struct(dummy_varlist, parent=None) + varlist = varlist_util.Varlist.from_struct(dummy_varlist) ve = varlist.vars[0] - translate = core.VariableTranslator().get_convention('None') + translate = translation.VariableTranslator().get_convention('None') tve = translate.translate(ve) self.assertEqual(ve.name, tve.name) self.assertEqual(ve.standard_name, tve.standard_name) @@ -142,12 +141,14 @@ def test_variabletranslator_bad_modifier(self): # test that supported modifier atmos_height is correct raised = False try: - varlist = diagnostic.Varlist.from_struct(dummy_varlist_correct, parent=None) - except Exception: + varlist = varlist_util.Varlist.from_struct(dummy_varlist_correct) + except Exception as exc: + print(exc) raised = True self.assertFalse(raised) # test that incorrect modifier height throws an error - self.assertRaises(ValueError, diagnostic.Varlist.from_struct, dummy_varlist_wrong, parent=None) + self.assertRaises(ValueError, varlist_util.Varlist.from_struct, dummy_varlist_wrong, parent=None) + class TestVariableTranslatorFiles(unittest.TestCase): def tearDown(self): @@ -162,9 +163,10 @@ def test_variabletranslator_load_files(self): code_root = os.path.dirname(os.path.dirname(cwd)) raised = False try: - translate = core.VariableTranslator(code_root, unittest=False) + translate = translation.VariableTranslator(code_root, unittest=False) translate.read_conventions(code_root, unittest=False) - except Exception: + except Exception as exc: + print(exc) raised = True self.assertFalse(raised) self.assertIn('CMIP', translate.conventions) @@ -174,18 +176,19 @@ def test_variabletranslator_real_data(self): # run in non-unit-test mode to test loading of config files cwd = os.path.dirname(os.path.realpath(__file__)) code_root = os.path.dirname(os.path.dirname(cwd)) - translate = core.VariableTranslator(code_root, unittest=False) + translate = translation.VariableTranslator(code_root, unittest=False) translate.read_conventions(code_root, unittest=False) self.assertEqual(translate.to_CF_name('NCAR', 'PRECT'), "precipitation_rate") - self.assertEqual(translate.from_CF_name('CMIP', 'toa_outgoing_longwave_flux'), "rlut") + self.assertEqual(translate.from_CF_name('CMIP', 'toa_outgoing_longwave_flux', + 'atmos'), "rlut") + class TestPathManager(unittest.TestCase): - # pylint: disable=maybe-no-member def setUp(self): # set up translation dictionary without calls to filesystem - setUp_config_singletons(paths = { - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + setUp_config_singletons(paths={ + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORK_DIR': 'D', 'OUTPUT_DIR': 'E' }) def tearDown(self): @@ -194,63 +197,61 @@ def tearDown(self): # ------------------------------------------------ def test_pathmgr_global(self): - paths = core.PathManager() + paths = util.ModelDataPathManager() self.assertEqual(paths.CODE_ROOT, 'A') self.assertEqual(paths.OUTPUT_DIR, 'E') @unittest.skip("") def test_pathmgr_global_asserterror(self): d = { - 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORK_DIR': 'D', 'OUTPUT_DIR': 'E' } - paths = core.PathManager() - self.assertRaises(AssertionError, paths.parse, d, list(d.keys())) - # initialize successfully so that tear_down doesn't break - #_ = core.PathManager(unittest = True) + paths = util.ModelDataPathManager(config) + self.assertRaises(AssertionError, paths, d, list(d.keys())) @unittest.skip("TODO: Test needs to be rewritten following v3 beta 3 release") -#@mock.patch.multiple(DataManager, __abstractmethods__=set()) class TestPathManagerPodCase(unittest.TestCase): def setUp(self): # set up translation dictionary without calls to filesystem setUp_config_singletons( config=self.case_dict, paths={ - 'CODE_ROOT':'A', 'OBS_DATA_ROOT':'B', 'MODEL_DATA_ROOT':'C', - 'WORKING_DIR':'D', 'OUTPUT_DIR':'E' + 'CODE_ROOT': 'A', 'OBS_DATA_ROOT': 'B', 'MODEL_DATA_ROOT': 'C', + 'WORK_DIR': 'D', 'OUTPUT_DIR': 'E' }, - pods={ 'AA':{ - 'settings':{}, - 'varlist':[{'var_name': 'pr_var', 'freq':'mon'}] - } + pods={'AA': { + 'settings': {}, + 'varlist': [{'var_name': 'pr_var', 'freq': 'mon'}] + } }) case_dict = { - 'CASENAME': 'A', 'model': 'B', 'FIRSTYR': 1900, 'LASTYR': 2100, + 'CASENAME': 'A', 'model': 'B', 'startdate': 19000101, 'enddate': 21001231, 'pod_list': ['AA'] } def tearDown(self): tearDown_config_singletons() - def test_pathmgr_model(self): - paths = core.PathManager() - case = DataManager(self.case_dict) - d = paths.model_paths(case) - self.assertEqual(d['MODEL_DATA_DIR'], 'TEST_MODEL_DATA_ROOT/A') - self.assertEqual(d['MODEL_WK_DIR'], 'TEST_WORKING_DIR/MDTF_A_1900_2100') - - def test_pathmgr_pod(self): - paths = core.PathManager() - case = DataManager(self.case_dict) - pod = diagnostic.Diagnostic('AA') - d = paths.pod_paths(pod, case) - self.assertEqual(d['POD_CODE_DIR'], 'TEST_CODE_ROOT/diagnostics/AA') - self.assertEqual(d['POD_OBS_DATA'], 'TEST_OBS_DATA_ROOT/AA') - self.assertEqual(d['POD_WK_DIR'], 'TEST_WORKING_DIR/MDTF_A_1900_2100/AA') + def test_pathmgr(self): + model_paths = util.ModelDataPathManager(config) + self.assertEqual(model_paths['MODEL_DATA_DIR'], 'TEST_MODEL_DATA_ROOT/A') + self.assertEqual(model_paths['MODEL_WORK_DIR'], 'TEST_WORK_DIR/MDTF_A_1900_2100') + # set up the case data source dictionary + cases = dict() + for case_name, case_d in self.case_dict.items(): + # instantiate the data_source class instance for the specified convention + cases[case_name] = data_sources.data_source[case_d.convention.upper() + "DataSource"](case_name, + case_d, + model_paths, + parent=None) + pod = pod_setup.PodObject('AA', config) + self.assertEqual(pod.paths['POD_CODE_DIR'], 'TEST_CODE_ROOT/diagnostics/AA') + self.assertEqual(pod.paths['POD_OBS_DATA'], 'TEST_OBS_DATA_ROOT/AA') + self.assertEqual(pod.paths['POD_WORK_DIR'], 'TEST_WORK_DIR/MDTF_A_1900_2100/AA') # --------------------------------------------------- diff --git a/src/tests/test_units.py b/src/tests/test_units.py index 7673ddb63..697910152 100644 --- a/src/tests/test_units.py +++ b/src/tests/test_units.py @@ -1,18 +1,19 @@ import unittest -import unittest.mock as mock from src import units -from src.util import exceptions + # TODO: better tests + class TestUnitConversionFactor(unittest.TestCase): def test_conversion_factors(self): # assertAlmostEqual w/default precision for comparison of floating-point # values self.assertAlmostEqual(units.conversion_factor('inch', 'cm'), 2.54) - self.assertAlmostEqual(units.conversion_factor('cm', 'inch'), 1.0/2.54) + self.assertAlmostEqual(units.conversion_factor('cm', 'inch'), 1.0 / 2.54) self.assertAlmostEqual(units.conversion_factor((123, 'inch'), 'cm'), 123.0 * 2.54) + class TestRefTime(unittest.TestCase): def get_test_date_strings(self, unit=None, time=None): strs = ['2020-05-25', '2000-01-01', '0001-01-01', '0000-01-01'] @@ -24,8 +25,8 @@ def get_test_date_strings(self, unit=None, time=None): return [f"{unit} since {s} {time}" for s in strs] def get_test_reftimes(self, unit=None, time=None, calendar=None): - return [units.Units(s, calendar=calendar) \ - for s in self.get_test_date_strings(unit, time)] + return [units.Units(s, calendar=calendar) + for s in self.get_test_date_strings(unit, time)] def test_isreftime(self): self.assertFalse(units.Units('days').isreftime) @@ -40,12 +41,12 @@ def test_isreftime(self): with self.subTest(test_u=u): self.assertTrue(u.isreftime) - for u in self.get_test_reftimes(time= '12:34:56', calendar='noleap'): + for u in self.get_test_reftimes(time='12:34:56', calendar='noleap'): with self.subTest(test_u=u): self.assertTrue(u.isreftime) - for u in self.get_test_reftimes(unit='minutes', time= '12:34:56', - calendar='proleptic_gregorian'): + for u in self.get_test_reftimes(unit='minutes', time='12:34:56', + calendar='proleptic_gregorian'): with self.subTest(test_u=u): self.assertTrue(u.isreftime) @@ -65,16 +66,16 @@ def multi_compare_id(self, list_1, compare_method): # without has_year_zero=True. There is no way to pass this # parameter to the cftime_dateparse method via this test,so # this hack will have to do for now - if '0000' in list_1[i].units.split('-')[0] or\ - '0000' in list_1[j].units.split('-')[0] or\ - 'days' not in list_1[i].units.split('-')[0] and\ + if '0000' in list_1[i].units.split('-')[0] or \ + '0000' in list_1[j].units.split('-')[0] or \ + 'days' not in list_1[i].units.split('-')[0] and \ 'days' not in list_1[j].units.split('-')[0]: # print(str(list_1[i].units.split('-')[0])) continue elif i == j: self.assertTrue( - getattr(list_1[i], compare_method)(list_1[j]) - ) + getattr(list_1[i], compare_method)(list_1[j]) + ) else: self.assertFalse( getattr(list_1[i], compare_method)(list_1[j]) @@ -83,10 +84,10 @@ def multi_compare_id(self, list_1, compare_method): def test_equal(self): # equal if they're equivalent and the unit conversion is the identity us = self.get_test_reftimes() \ - + self.get_test_reftimes(calendar='julian') \ - + self.get_test_reftimes(unit='minutes', time= '12:34:56', - calendar='proleptic_gregorian') \ - + [units.Units('days'), units.Units('minutes')] + + self.get_test_reftimes(calendar='julian') \ + + self.get_test_reftimes(unit='minutes', time='12:34:56', + calendar='proleptic_gregorian') \ + + [units.Units('days'), units.Units('minutes')] self.multi_compare_id(us, 'equals') @@ -102,22 +103,22 @@ def test_equivalent(self): us_2 = self.get_test_reftimes(calendar='julian') self.multi_compare(us, us_2, 'equivalent', False) - us_2 = self.get_test_reftimes(unit='minutes', time= '12:34:56', - calendar='proleptic_gregorian') + us_2 = self.get_test_reftimes(unit='minutes', time='12:34:56', + calendar='proleptic_gregorian') self.multi_compare(us, us_2, 'equivalent', False) def test_reftime_base_eq(self): # true if base units are equal us = self.get_test_reftimes() \ - + self.get_test_reftimes(calendar='julian') + + self.get_test_reftimes(calendar='julian') u_day = [units.Units('days')] self.multi_compare_id(us, 'reftime_base_eq') self.multi_compare(u_day, us, 'reftime_base_eq', True) self.multi_compare(us, u_day, 'reftime_base_eq', True) - us_2 = self.get_test_reftimes(unit='minutes', time= '12:34:56', - calendar='proleptic_gregorian') + us_2 = self.get_test_reftimes(unit='minutes', time='12:34:56', + calendar='proleptic_gregorian') u_min = [units.Units('minutes')] self.multi_compare_id(us_2, 'reftime_base_eq') @@ -128,5 +129,3 @@ def test_reftime_base_eq(self): self.multi_compare(u_min, us, 'reftime_base_eq', False) self.multi_compare(u_day, us_2, 'reftime_base_eq', False) self.multi_compare(us, us_2, 'reftime_base_eq', False) - - diff --git a/src/translation.py b/src/translation.py new file mode 100644 index 000000000..8ae3d074a --- /dev/null +++ b/src/translation.py @@ -0,0 +1,521 @@ +"""Utilities for variable translation +""" +import os +import collections +import copy +import dataclasses as dc +import glob +import typing +import pathlib +from src import util, data_model, units +from src.units import Units + +import logging + +_log = logging.getLogger(__name__) + +_NO_TRANSLATION_CONVENTION = 'no_translation' # naming convention for disabling translation + + +@util.mdtf_dataclass +class TranslatedVarlistEntry(data_model.DMVariable): + """Class returned by :meth:`VarlistTranslator.translate`. Marks some + attributes inherited from :class:`~data_model.DMVariable` as being queryable + in :meth:`~data_manager.DataframeQueryDataSourceBase.query_dataset`. + """ + # to be more correct, we should probably have VarlistTranslator return a + # DMVariable, which is converted to this type on assignment to the + # VarlistEntry, since metadata fields are specific to the VarlistEntry + # implementation. + convention: str = util.MANDATORY + name: str = \ + dc.field(default=util.MANDATORY, metadata={'query': True}) + standard_name: str = \ + dc.field(default=util.MANDATORY, metadata={'query': True}) + units: Units = util.MANDATORY + # dims: list # fields inherited from data_model.DMVariable + # modifier : str + scalar_coords: list = \ + dc.field(init=False, default_factory=list, metadata={'query': True}) + log: typing.Any = util.MANDATORY # assigned from parent var + + +@util.mdtf_dataclass +class FieldlistEntry(data_model.DMDependentVariable): + """Class corresponding to an entry in a fieldlist file. + """ + # name: str # fields inherited from DMDependentVariable + # standard_name: str + # units: Units + # modifier : str + # dims: list # fields inherited from _DMDimensionsMixin + # scalar_coords: list + scalar_coord_templates: dict = dc.field(default_factory=dict) + + def __post_init__(self, coords=None): + super(FieldlistEntry, self).__post_init__(coords) + assert len(self.scalar_coords) == 0, 'FieldlistEntry scalar_coords attribute has nonzero length' + # if specified, verify that POD modifier attributes are valid + if not self.modifier.lower().strip() in (None, ''): + _str = VariableTranslator() + if self.modifier not in _str.modifier: + raise ValueError(f"Modifier {self.modifier} is not a recognized value.") + + _ndim_to_axes_set = { + # allow specifying dimensionality as shorthand for explicit list + # of coordinate dimension names + 1: 'PLACEHOLDER_T_COORD', + 2: ('PLACEHOLDER_Y_COORD', 'PLACEHOLDER_X_COORD'), + 3: ('PLACEHOLDER_T_COORD', 'PLACEHOLDER_Y_COORD', 'PLACEHOLDER_X_COORD'), + 4: ('PLACEHOLDER_T_COORD', 'PLACEHOLDER_Z_COORD', 'PLACEHOLDER_Y_COORD', + 'PLACEHOLDER_X_COORD') + } + _placeholder_class_dict = { + 'PLACEHOLDER_X_COORD': data_model.DMPlaceholderXCoordinate, + 'PLACEHOLDER_Y_COORD': data_model.DMPlaceholderYCoordinate, + 'PLACEHOLDER_Z_COORD': data_model.DMPlaceholderZCoordinate, + 'PLACEHOLDER_T_COORD': data_model.DMPlaceholderTCoordinate, + 'PLACEHOLDER_COORD': data_model.DMPlaceholderCoordinate + } + + @classmethod + def from_struct(cls, dims_d: dict, name: str, **kwargs): + # if we only have ndim, map to axes names + if 'dimensions' not in kwargs and 'ndim' in kwargs: + kwargs['dimensions'] = cls._ndim_to_axes_set[kwargs.pop('ndim')] + + # map dimension names to coordinate objects + kwargs['coords'] = [] + if 'dimensions' not in kwargs or not kwargs['dimensions']: + raise ValueError(f"No dimensions specified for fieldlist entry {name}.") + for d_name in kwargs.pop('dimensions'): + if d_name in cls._placeholder_class_dict: + coord_cls = cls._placeholder_class_dict[d_name] + kwargs['coords'].append(coord_cls()) + elif d_name not in dims_d: + raise ValueError((f"Unknown dimension name {d_name} in fieldlist " + f"entry for {name}.")) + else: + kwargs['coords'].append(dims_d[d_name]) + + for d_name in kwargs.get('scalar_coord_templates', dict()): + if d_name not in dims_d: + raise ValueError((f"Unknown dimension name {d_name} in scalar " + f"coord definition for fieldlist entry for {name}.")) + + filter_kw = util.filter_dataclass(kwargs, cls, init=True) + assert filter_kw['coords'] + cls.standard_name = kwargs['standard_name'] + if filter_kw.get('realm'): + cls.realm = filter_kw['realm'] + + return cls(name=name, **filter_kw) + + def scalar_name(self, old_coord, new_coord, log=_log): + """Uses one of the scalar_coord_templates to construct the translated + variable name for this variable on a scalar coordinate slice (eg. + pressure level). + """ + c = old_coord + assert c.is_scalar + key = new_coord.name + if key not in self.scalar_coord_templates: + raise ValueError((f"Don't know how to name {c.name} ({c.axis}) slice " + f"of {self.name}." + )) + # construct convention's name for this variable on a level + name_template = self.scalar_coord_templates[key] + new_name = name_template.format(value=int(new_coord.value)) + if units.units_equal(c.units, new_coord.units): + log.debug("Renaming %s %s %s slice of '%s' to '%s'.", + c.value, c.units, c.axis, self.name, new_name) + else: + log.debug("Renaming %s slice of '%s' to '%s' (@ %s %s = %s %s).", + c.axis, self.name, new_name, c.value, c.units, + new_coord.value, new_coord.units + ) + return new_name + + def scalar_coords(self): + pass + + def dims(self): + pass + + +@util.mdtf_dataclass +class Fieldlist: + """Class corresponding to a single variable naming convention (single file + in data/fieldlist_*.jsonc). + + TODO: implement more robust indexing/lookup scheme. standard_name is not + a unique identifier, but should include cell_methods, etc. as well as + dimensionality. + """ + name: str = util.MANDATORY + axes: util.WormDict = dc.field(default_factory=util.WormDict) + axes_lut: util.WormDict = dc.field(default_factory=util.WormDict) + entries: util.WormDict = dc.field(default_factory=util.WormDict) + lut: util.WormDict = dc.field(default_factory=util.WormDict) + env_vars: dict = dc.field(default_factory=dict) + + @classmethod + def from_struct(cls, d: dict, code_root: str, log=None): + def _process_coord(section_name: str, d: dict, temp_d: dict, code_root: str, log=None): + # build two-stage lookup table by axis type, then name + # The name is the key since standard_names are not necessarily unique ID's + # and coordinates may be assigned to variables in multiple realms + section_d = d.pop(section_name, dict()) + if '$ref' in section_d.keys(): + ref_file_query = pathlib.Path(code_root, 'data', section_d['$ref']) + ref_file_path = str(ref_file_query) + assert ".json" in ref_file_query.suffix, f"{ref_file_path} is not a json(c) file" + coord_file_entries = util.read_json(ref_file_path, log=log) + + regex_dict = util.RegexDict(coord_file_entries) + section_d.update([r for r in regex_dict.get_matching_value('axis')][0]) + section_d.pop('$ref', None) + + for k, v in section_d.items(): + ax = v['axis'] + entry = data_model.coordinate_from_struct(v, name=k) + d['axes'][k] = entry + temp_d[ax][k] = entry.standard_name + return d, temp_d + + def _process_var(section_name, d, temp_d): + # build two-stage lookup table (by standard name, then data + # dimensionality) + section_d = d.pop(section_name, dict()) + for k, v in section_d.items(): + entry = FieldlistEntry.from_struct(d['axes'], name=k, **v) + d['entries'][k] = entry + # note that realm and modifier class atts are empty strings + # by default and, therefore, so are the corresponding dictionary + # keys. TODO: be sure to handle empty keys in PP + if not temp_d[entry.standard_name].get(entry.realm): + temp_d[entry.standard_name][entry.realm] = dict() + temp_d[entry.standard_name][entry.realm][entry.modifier] = entry + return d, temp_d + + temp_d = collections.defaultdict(util.WormDict) + d['axes'] = util.WormDict() + d['axes_lut'] = util.WormDict() + d, temp_d = _process_coord('coords', d, temp_d, code_root, log) + d['axes_lut'].update(temp_d) + + temp_d = collections.defaultdict(util.WormDict) + d['entries'] = util.WormDict() + d['lut'] = util.WormDict() + d, temp_d = _process_var('aux_coords', d, temp_d) + d, temp_d = _process_var('variables', d, temp_d) + d['lut'].update(temp_d) + return cls(**d) + + def to_CF(self, var_or_name): + """Returns :class:`FieldlistEntry` for the variable having the given + name in this convention. + """ + if hasattr(var_or_name, 'name'): + return self.entries[var_or_name.name] + else: + return self.entries[var_or_name] + + def to_CF_name(self, var_or_name): + """Like :meth:`to_CF`, but only return the CF standard name, given the + name in this convention. + """ + return self.to_CF(var_or_name).standard_name + + def from_CF(self, + var_or_name, + realm: str, + modifier=None, + num_dims: int = 0, + has_scalar_coords_att: bool = False, + name_only: bool = False): + """Look up :class:`FieldlistEntry` corresponding to the given standard + name, optionally providing a modifier to resolve ambiguity. + + TODO: this is a hacky implementation; FieldlistEntry needs to be + expanded with more ways to uniquely identify variable (eg cell methods). + Args: + var_or_name: variable or name of the variable + realm: variable realm (atmos, ocean, land, ice, etc...) + modifier:optional string to distinguish a 3-D field from a 4-D field with + the same var_or_name value + num_dims: number of dimensions of the POD variable corresponding to var_or_name + has_scalar_coords_att: boolean indicating that the POD variable has a scalar_coords + attribute, and therefore requires a level from a 4-D field + name_only: boolean indicating to not return a modifier--hacky way to accommodate + a from_CF_name call that does not provide other metadata + """ + if hasattr(var_or_name, 'standard_name'): + standard_name = var_or_name.standard_name + else: + standard_name = var_or_name + + if standard_name in self.lut: + lut1 = self.lut[standard_name][realm] # abbreviate + fl_entry: FieldlistEntry = None + empty_mod_count = 0 # counter for modifier attributes that are blank strings in the fieldlist lookup table + if not modifier: # empty strings and None types evaluate to False + entries = tuple(lut1.values()) + if len(entries) > 1: + for e in entries: + if not e.modifier.strip(): + empty_mod_count += 1 # fieldlist LUT entry has no modifier attribute + if has_scalar_coords_att or num_dims == len(e.dims) or name_only: + # fieldlist lut entry has a blank modifier + fl_entry = e + if empty_mod_count > 1: + raise ValueError((f"Variable name in convention '{self.name}' " + f"not uniquely determined by standard name '{standard_name}'.")) + else: + fl_entry = entries[0] + else: + if modifier not in lut1: + raise KeyError((f"Queried standard name '{standard_name}' with an " + f"unexpected modifier {modifier} not in convention " + f"'{self.name}'.")) + fl_entry = lut1[modifier] + + if not fl_entry: + raise ValueError("fl_entry evaluated as a None Type") + return copy.deepcopy(fl_entry) + raise KeyError((f"Standard name '{standard_name}' not defined in " + f"convention '{self.name}'.")) + + def from_CF_name(self, var_or_name: str, realm: str, modifier=None): + """Like :meth:`from_CF`, but only return the variable's name in this + convention. + + Args: + var_or_name: variable or name of the variable + realm: model realm of variable + modifier:optional string to distinguish a 3-D field from a 4-D field with + the same var_or_name value + """ + return self.from_CF(var_or_name, modifier=modifier, name_only=True, realm=realm).name + + def translate_coord(self, coord, log=_log): + """Given a :class:`~data_model.DMCoordinate`, look up the corresponding + translated :class:`~data_model.DMCoordinate` in this convention. + """ + ax = coord.axis + if ax not in self.axes_lut: + raise KeyError(f"Axis {ax} not defined in convention '{self.name}'.") + + lut1 = self.axes_lut[ax] + if not hasattr(coord, 'standard_name'): + coords = tuple(lut1.values()) + if len(coords) > 1: + raise ValueError((f"Coordinate dimension in convention '{self.name}' " + f"not uniquely determined by coordinate {coord.name}.")) + new_coord = coords[0] + else: + if coord.standard_name not in lut1.values(): + raise KeyError((f"Coordinate {coord.name} with standard name " + f"'{coord.standard_name}' not defined in convention '{self.name}'.")) + new_coord = [k for k in lut1.keys() if lut1[k] == coord.standard_name][0] + + if hasattr(coord, 'is_scalar') and coord.is_scalar: + new_coord = copy.deepcopy(new_coord) + new_coord.value = units.convert_scalar_coord(coord, new_coord.units, + log=log) + else: + new_coord = dc.replace(coord, + **(util.filter_dataclass(new_coord, coord))) + return new_coord + + def translate(self, var): + """Returns :class:`TranslatedVarlistEntry` instance, with populated + coordinate axes. Units of scalar coord slices are translated to the units + of the conventions' coordinates. Includes logic to translate and rename + scalar coords/slices, e.g. :class:`~varlist_util.VarlistEntry` for 'ua' + (intrinsically 4D) @ 500mb could produce a :class:`TranslatedVarlistEntry` + for 'u500' (3D slice), depending on naming convention. + """ + if var.use_exact_name: + # HACK; dataclass.asdict says VarlistEntry has no _id attribute & not sure why + fl_entry = {f.name: getattr(var, f.name, util.NOTSET) + for f in dc.fields(TranslatedVarlistEntry) if hasattr(var, f.name)} + new_name = var.name + else: + has_scalar_coords = bool(var.scalar_coords) + + fl_entry = self.from_CF(var.standard_name, + var.realm, + var.modifier, + var.dims.__len__(), + has_scalar_coords) + new_name = fl_entry.name + + new_dims = [self.translate_coord(dim, log=var.log) for dim in var.dims] + new_scalars = [self.translate_coord(dim, log=var.log) for dim in var.scalar_coords] + if len(new_scalars) > 1: + raise NotImplementedError() + elif len(new_scalars) == 1: + assert not var.use_exact_name, "assertion error: var.use_exact_name set to true for " + var.full_name + # change translated name to request the slice instead of the full var + # keep the scalar_coordinate value attribute on the translated var + new_name = fl_entry.scalar_name( + var.scalar_coords[0], new_scalars[0], log=var.log + ) + + return util.coerce_to_dataclass( + fl_entry, TranslatedVarlistEntry, + name=new_name, coords=(new_dims + new_scalars), + convention=self.name, log=var.log + ) + + +class NoTranslationFieldlist(metaclass=util.Singleton): + """Class which partially implements the :class:`Fieldlist` interface but + does no variable translation. :class:`~diagnostic.VarlistEntry` objects from + the POD are passed through to create :class:`TranslatedVarlistEntry` objects. + """ + + def __init__(self): + # only a Singleton to ensure that we only log this message once + _log.info('Variable name translation disabled.') + + def to_CF(self, var_or_name): + # should never get here - not called externally + raise NotImplementedError + + def to_CF_name(self, var_or_name): + if hasattr(var_or_name, 'name'): + return var_or_name.name + else: + return var_or_name + + def from_CF(self, var_or_name, modifier=None): + # should never get here - not called externally + raise NotImplementedError + + def from_CF_name(self, var_or_name): + if hasattr(var_or_name, 'name'): + return var_or_name.name + else: + return var_or_name + + def translate_coord(self, coord, log=_log): + # should never get here - not called externally + raise NotImplementedError + + def translate(self, var): + """Returns :class:`TranslatedVarlistEntry` instance, populated with + contents of input :class:`~diagnostic.VarlistEntry` instance. + + .. note:: + We return a copy of the :class:`~diagnostic.VarlistEntry` because + logic in :class:`~xr_parser.DefaultDatasetParser` alters the translation + based on the file's actual contents. + """ + coords_copy = copy.deepcopy(var.dims) + copy.deepcopy(var.scalar_coords) + # TODO: coerce_to_dataclass runs into recursion limit on var; fix that + return TranslatedVarlistEntry( + name=var.name, + standard_name=var.standard_name, + units=var.units, + convention=_NO_TRANSLATION_CONVENTION, + coords=coords_copy, + modifier=var.modifier, + log=var.log + ) + + +class VariableTranslator(metaclass=util.Singleton): + """:class:`~util.Singleton` containing information for different variable + naming conventions. These are defined in the ``data/fieldlist_*.jsonc`` + files. + """ + + def __init__(self, code_root=None, unittest=False): + self._unittest = unittest + self.conventions = util.WormDict() + self.aliases = util.WormDict() + self.modifier = util.read_json(os.path.join(code_root, 'data', 'modifiers.jsonc'), log=_log) + + def add_convention(self, d: dict, file_path: str, log=None): + conv_name = d['name'].lower() + _log.debug("Adding variable name convention '%s'", conv_name) + for model in d.pop('models', []): + self.aliases[model] = conv_name + self.conventions[conv_name] = Fieldlist.from_struct(d, file_path, log=log) + + def read_conventions(self, code_root: str, unittest=False): + """ Read in the conventions from the Fieldlists and populate the convention attribute. """ + if unittest: + # value not used, when we're testing will mock out call to read_json + # below with actual translation table to use for test + config_files = [] + else: + glob_pattern = os.path.join( + code_root, 'data', 'fieldlist_*.jsonc' + ) + config_files = glob.glob(glob_pattern) + for f in config_files: + try: + d = util.read_json(f, log=_log) + self.add_convention(d, code_root, log=_log) + except Exception as exc: + _log.exception("Caught exception loading fieldlist file %s: %r", + f, exc) + continue + + def get_convention_name(self, conv_name: str): + """Resolve the naming convention associated with a given + :class:`Fieldlist` object from among a set of possible aliases. + """ + if conv_name in self.conventions \ + or conv_name == _NO_TRANSLATION_CONVENTION: + return conv_name + if conv_name.upper() in self.aliases: + _log.debug("Using convention '%s' based on alias '%s'.", + self.aliases[conv_name], conv_name) + return self.aliases[conv_name] + _log.error("Unrecognized variable name convention '%s'.", + conv_name) + raise KeyError(conv_name) + + def get_convention(self, conv_name: str): + """Return the :class:`Fieldlist` object containing the variable name + translation logic for a given convention name. + """ + if conv_name == _NO_TRANSLATION_CONVENTION: + # hard-coded special case: do no translation + return NoTranslationFieldlist() + else: + # normal case: translate according to data source's naming convention + conv_name = self.get_convention_name(conv_name) + return self.conventions[conv_name] + + def _fieldlist_method(self, conv_name: str, method_name: str, *args, **kwargs): + """Wrapper which determines the requested convention and calls the + requested *method_name* on the :class:`Fieldlist` object for that + convention. + """ + meth = getattr(self.get_convention(conv_name), method_name) + return meth(*args, **kwargs) + + def to_CF(self, conv_name: str, name: str): + return self._fieldlist_method(conv_name, 'to_CF', name) + + def to_CF_name(self, conv_name: str, name: str): + return self._fieldlist_method(conv_name, 'to_CF_name', name) + + def from_CF(self, conv_name: str, standard_name: str, modifier=None): + return self._fieldlist_method(conv_name, 'from_CF', + standard_name, modifier=modifier) + + def from_CF_name(self, conv_name: str, standard_name: str, realm: str, modifier=None): + return self._fieldlist_method(conv_name, 'from_CF_name', + standard_name, realm, modifier=modifier) + + def translate_coord(self, conv_name: str, coord, log=_log): + return self._fieldlist_method(conv_name, 'translate_coord', coord, log=log) + + def translate(self, conv_name: str, var): + return self._fieldlist_method(conv_name, 'translate', var) diff --git a/src/units.py b/src/units.py index edcac371d..9cfb0532a 100644 --- a/src/units.py +++ b/src/units.py @@ -5,13 +5,16 @@ from src import util import logging + _log = logging.getLogger(__name__) + class Units(cfunits.Units): """Wrap `Units `__ class of `cfunits `__ to isolate this third-party dependency to the code in this module. """ + def reftime_base_eq(self, other): """Comparison function that recognizes reference time units (e.g., 'days since 1970-01-01') as being equal to unqualified time units @@ -26,6 +29,7 @@ def reftime_base_eq(self, other): other_2 = (cls(other._units_since_reftime) if other.isreftime else other) return self_2.equals(other_2) + def to_cfunits(*args): """Coerce string-valued units and (quantity, unit) tuples to :class:`Units` objects. @@ -33,6 +37,7 @@ def to_cfunits(*args): If more than one such argument is given in *args*, return a list containing the results of coercing each argument. """ + def _coerce(u): if isinstance(u, tuple): # (quantity, unit) tuple @@ -47,30 +52,33 @@ def _coerce(u): else: return [_coerce(arg) for arg in args] + def to_equivalent_units(*args): """Same as :func:`to_cfunits`, but raises TypeError if units of all quantities in *args* are not equivalent after coercion. """ args = to_cfunits(*args) - ref_unit = args.pop() # last entry in list + ref_unit = args.pop() # last entry in list for unit in args: if not ref_unit.equivalent(unit): raise TypeError((f"Units {repr(ref_unit)} and " - f"{repr(unit)} are inequivalent.")) + f"{repr(unit)} are inequivalent.")) args.append(ref_unit) return args + def relative_tol(x, y): """HACK to return ``max(|x-y|/x, |x-y|/y)`` for unit-ful quantities *x*, *y* with equivalent units. Vulnerable to underflow in principle. *x* and *y* are coerced to :class:`Units` objects via :func:`to_cfunits`. """ - x, y = to_equivalent_units(x,y) - tol_1 = Units.conform(1.0, x, y) # = float(x/y) - tol_2 = Units.conform(1.0, y, x) # = float(y/x) + x, y = to_equivalent_units(x, y) + tol_1 = Units.conform(1.0, x, y) # = float(x/y) + tol_2 = Units.conform(1.0, y, x) # = float(y/x) return max(abs(tol_1 - 1.0), abs(tol_2 - 1.0)) + def units_equivalent(*args): """Returns True if and only if all unit-ful quantities in *args* are physically equivalent: they represent the same physical quantity, up to a multiplicative @@ -82,6 +90,7 @@ def units_equivalent(*args): ref_unit = args.pop() return all(ref_unit.equivalent(unit) for unit in args) + def units_reftime_base_eq(*args): """Returns True if and only if all unit-ful quantities in *args* are physically equivalent: they represent the same physical quantity, up to a multiplicative @@ -93,6 +102,7 @@ def units_reftime_base_eq(*args): ref_unit = args.pop() return all(ref_unit.reftime_base_eq(unit) for unit in args) + def units_equal(*args, rtol=None): """Returns True if and only if all unit-ful quantities in *args* are strictly equal (:func:`units_equivalent` is True and :func:`conversion_factor` = 1). @@ -112,11 +122,12 @@ def units_equal(*args, rtol=None): for unit in args: try: if not (relative_tol(ref_unit, unit) <= rtol): - return False # outside tolerance + return False # outside tolerance except TypeError: - return False # inequivalent units + return False # inequivalent units return True + def conversion_factor(source_unit, dest_unit): """Return floating point factor which implements a given unit conversion. Defined so that (conversion factor) * (quantity in *source_units*) = @@ -128,6 +139,7 @@ def conversion_factor(source_unit, dest_unit): source_unit, dest_unit = to_equivalent_units(source_unit, dest_unit) return Units.conform(1.0, source_unit, dest_unit) + # -------------------------------------------------------------------- def convert_scalar_coord(coord, dest_units, log=_log): @@ -139,18 +151,18 @@ def convert_scalar_coord(coord, dest_units, log=_log): # convert units of scalar value to convention's coordinate's units dest_value = coord.value * conversion_factor(coord.units, dest_units) log.debug("Converted %s %s %s slice of '%s' to %s %s.", - coord.value, coord.units, coord.axis, coord.name, - dest_value, dest_units, - tags=util.ObjectLogTag.NC_HISTORY - ) + coord.value, coord.units, coord.axis, coord.name, + dest_value, dest_units + ) else: # identical units log.debug("Copied value of %s slice (=%s %s) of '%s' (identical units).", - coord.axis, coord.value, coord.units, coord.name - ) + coord.axis, coord.value, coord.units, coord.name + ) dest_value = coord.value return dest_value + def convert_dataarray(ds, da_name, src_unit=None, dest_unit=None, log=_log): """Wrapper for cfunits `conform() `__ @@ -182,10 +194,10 @@ def convert_dataarray(ds, da_name, src_unit=None, dest_unit=None, log=_log): src_unit = da.attrs['units'] except KeyError: raise TypeError((f"convert_dataarray: 'units' attribute not defined " - f"on {da.name}.")) + f"on {da.name}.")) if dest_unit is None: raise TypeError((f"convert_dataarray: dest_unit not given for unit " - "conversion on {da.name}.")) + "conversion on {da.name}.")) if 'standard_name' in da.attrs: std_name = f" ({da.attrs['standard_name']})" @@ -193,13 +205,12 @@ def convert_dataarray(ds, da_name, src_unit=None, dest_unit=None, log=_log): std_name = "" if units_equal(src_unit, dest_unit): log.debug(("Source, dest units of '%s'%s identical (%s); no conversion " - "done."), da.name, std_name, dest_unit) + "done."), da.name, std_name, dest_unit) return ds log.debug("Convert units of '%s'%s from '%s' to '%s'.", - da.name, std_name, src_unit, dest_unit, - tags=util.ObjectLogTag.NC_HISTORY - ) + da.name, std_name, src_unit, dest_unit + ) da_attrs = da.attrs.copy() fac = conversion_factor(src_unit, dest_unit) ds = ds.assign({da_name: fac * ds[da_name]}) diff --git a/src/util/__init__.py b/src/util/__init__.py index fdcade537..c1a34a164 100644 --- a/src/util/__init__.py +++ b/src/util/__init__.py @@ -1,36 +1,47 @@ # List public symbols for package import. +from .exceptions import * from .basic import ( Singleton, abstract_attribute, MDTFABCMeta, MultiMap, WormDict, - ConsistentDict, WormDefaultDict, NameSpace, MDTFEnum, MDTFIntEnum, - sentinel_object_factory, MDTF_ID, - is_iterable, to_iter, from_iter, remove_prefix, - remove_suffix, filter_kwargs, splice_into_list, deserialize_class + ConsistentDict, WormDefaultDict, NameSpace, MDTFEnum, + sentinel_object_factory, MDTF_ID, deactivate, ObjectStatus, + is_iterable, to_iter, from_iter, remove_prefix, RegexDict, + remove_suffix, filter_kwargs, splice_into_list ) + +from .logs import ( + OBJ_LOG_ROOT, ObjectLogTag, MDTFObjectLogger, MDTFObjectLoggerMixin, + VarlistEntryLoggerMixin, PODLoggerMixin, CaseLoggerMixin, + signal_logger, git_info, mdtf_log_header, transfer_log_cache, + MDTFObjectBase +) + from .dataclass import ( RegexPatternBase, RegexPattern, RegexPatternWithTemplate, ChainedRegexPattern, - NOTSET, MANDATORY, mdtf_dataclass, regex_dataclass, dataclass_factory, - filter_dataclass, coerce_to_dataclass + NOTSET, MANDATORY, mdtf_dataclass, regex_dataclass, + filter_dataclass, coerce_to_dataclass, ClassMaker ) from .datelabel import ( DatePrecision, DateRange, Date, DateFrequency, FXDateMin, FXDateMax, FXDateRange, FXDateFrequency, AbstractDateRange, AbstractDate, AbstractDateFrequency ) -from .exceptions import * + from .filesystem import ( - abbreviate_path, resolve_path, recursive_copy, - check_executable, find_files, check_dir, bump_version, strip_comments, - parse_json, read_json, find_json, write_json, pretty_print_json, - append_html_template - # is_subpath, -) -from .logs import ( - OBJ_LOG_ROOT, ObjectLogTag, MDTFObjectLogger, MDTFObjectLoggerMixin, - VarlistEntryLoggerMixin, PODLoggerMixin, CaseLoggerMixin, - signal_logger, git_info, mdtf_log_header, transfer_log_cache + abbreviate_path, resolve_path, recursive_copy, _DoubleBraceTemplate, + check_executable, find_files, check_dir, bump_version, + append_html_template, TempDirManager ) + +from .json_utils import * + from .processes import ( ExceptionPropagatingThread, poll_command, run_command, run_shell_command ) + +from .path_utils import ( + PodPathManager, ModelDataPathManager +) + +from .catalog import * diff --git a/src/util/basic.py b/src/util/basic.py index 3589455e7..a79b3b615 100644 --- a/src/util/basic.py +++ b/src/util/basic.py @@ -6,9 +6,11 @@ import enum import itertools import string +import re +import textwrap import unittest.mock import uuid -from . import exceptions +from . import exceptions, PropagatedEvent import logging @@ -69,25 +71,19 @@ def __call__(cls, *args, **kwargs): return instance -class _Singleton(type): - """Private metaclass that creates a :class:`~util.Singleton` base class when - called. This version is taken from ``__ - and is compatible with Python 2 and 3. - """ +class Singleton(type): + # Updated definition to use solution #3 since Python 2 is no longer supported + # https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python/6798042 + # example implementation: + # class MyClass(BaseClass, metaclass=Singleton): + # pass _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: - cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs) + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] - -class Singleton(_Singleton('SingletonMeta', (object,), {})): - """Parent class defining the - `Singleton `_ pattern. We - use this as safer way to pass around global state. - """ - @classmethod def _reset(cls): """Private method of all :class:`~util.Singleton`-derived classes added @@ -319,7 +315,7 @@ def __setstate__(self, state): self.clear() self.update(state) - def toDict(self): + def toDict(self) -> dict: """Recursively converts a NameSpace back into a dictionary. """ return type(self)._toDict(self) @@ -375,24 +371,33 @@ def _freeze(self): def __eq__(self, other): if type(other) is type(self): - return (self._freeze() == other._freeze()) + return self._freeze() == other._freeze() else: return False def __ne__(self, other): - return (not self.__eq__(other)) # more foolproof + return not self.__eq__(other) # more foolproof def __hash__(self): return hash(self._freeze()) -class _MDTFEnumMixin(): +class _MDTFEnumMixin: + __members__ = None + name = None + + def __init__(self, *args): + self.name = None + def __str__(self): return str(self.name).lower() def __repr__(self): return '<%s.%s>' % (self.__class__.__name__, self.name) + def __getitem__(self, key): + return getattr(self, key) + @classmethod def from_struct(cls, str_): """Instantiate from string.""" @@ -420,10 +425,21 @@ def __new__(cls, *args, **kwargs): return obj -class MDTFIntEnum(_MDTFEnumMixin, enum.IntEnum): - """Customize :py:class:`~enum.IntEnum` analogous to :class:`MDTFEnum`. - """ - pass +ObjectStatus = MDTFEnum( + 'ObjectStatus', + 'NOTSET ACTIVE INACTIVE FAILED SUCCEEDED', + module=__name__ +) +ObjectStatus.__doc__ = """ +:class:`util.MDTFEnum` used to track the status of an object hierarchy object: +- *NOTSET*: the object hasn't been fully initialized. +- *ACTIVE*: the object is currently being processed by the framework. +- *INACTIVE*: the object has been initialized, but isn't being processed (e.g., + alternate :class:`~diagnostic.VarlistEntry`\s). +- *FAILED*: processing of the object has encountered an error, and no further + work will be done. +- *SUCCEEDED*: Processing finished successfully. +""" def sentinel_object_factory(obj_name): @@ -434,7 +450,7 @@ def sentinel_object_factory(obj_name): return getattr(unittest.mock.sentinel, obj_name) -class MDTF_ID(): +class MDTF_ID: """Class wrapping :py:class:`~uuid.UUID`, to provide unique ID numbers for members of the object hierarchy (cases, pods, variables, etc.), so that we don't need to require that objects in these classes have unique names. @@ -472,12 +488,12 @@ def __hash__(self): def __eq__(self, other): if hasattr(other, '_uuid'): - return (self._uuid == other._uuid) + return self._uuid == other._uuid else: return False def __ne__(self, other): - return (not self.__eq__(other)) # more foolproof + return not self.__eq__(other) # more foolproof # ------------------------------------------------------------------ @@ -608,36 +624,88 @@ def splice_into_list(list_, splice_d, key_fn=None, log=_log): return list(itertools.chain.from_iterable(spliced_chunks)) -def deserialize_class(name): - """Given the name of a currently defined class, return the class itself. - This avoids security issues with calling :py:func:`eval`. Based on - ``__. +def canonical_arg_name(str_): + """Convert a flag or other specification to a destination variable name. + The destination variable name always has underscores, never hyphens, in + accordance with PEP8. - Args: - name (str): name of the class to look up. + E.g., ``canonical_arg_name('--GNU-style-flag')`` returns "GNU_style_flag". + """ + return str_.lstrip('-').rstrip().replace('-', '_') - Returns: - :obj:`class` with the given name, if currently imported. - Raises: - :py:class:`ValueError`: If class not found in current namespace. +def plugin_key(plugin_name): + """Convert user input for plugin options to string used to lookup plugin + value from options defined in cli_plugins.jsonc files. + + Ignores spaces and underscores in supplied choices for CLI plugins, and + make matching of plugin names case-insensititve. """ - try: - # for performance, search python builtin types first before going - # through everything - return getattr(__builtins__, name) - except AttributeError: - # _log.debug('%s not found in builtin types.', name) - pass - q = collections.deque([object]) # everything inherits from object - while q: - t = q.popleft() - if t.__name__ == name: - return t + return re.sub(r"[\s_]+", "", plugin_name).lower() + + +def word_wrap(str_): + """Clean whitespace and perform 80-column word wrapping for multi-line help + and description strings. Explicit paragraph breaks must be encoded as a + double newline \(``\\n\\n``\). + """ + paragraphs = textwrap.dedent(str_).split('\n\n') + paragraphs = [re.sub(r'\s+', ' ', s).strip() for s in paragraphs] + paragraphs = [textwrap.fill(s, width=80) for s in paragraphs] + return '\n\n'.join(paragraphs) + + +def iterdict(d): + """Iterate through a nested dictionary + Return the key-value pair, and a level index + for the deepest level + """ + level = 0 + for k, v in d.items(): + if isinstance(v, dict) or isinstance(v, collections.OrderedDict): + iterdict(v) + level = level+1 else: - try: # keep looking - q.extend(t.__subclasses__()) - except TypeError: - pass - if not type(t): - raise ValueError('No such type: %r' % name) \ No newline at end of file + return k, v, level + + +# level at which to log deactivation events + _deactivation_log_level = logging.ERROR + + +def deactivate(obj, exc, level=None): + """Deactivate an object and dependencies and set object status""" + # always log exceptions, even if we've already failed + obj.log.store_exception(exc) + + if not (obj.failed or obj.status == ObjectStatus.SUCCEEDED): + # only need to log and update on status change for still-active objs + if level is None: + level = obj._deactivation_log_level # default level for child class + obj.log.log(level, "Deactivated %s due to %r.", obj.full_name, exc) + + # update status on self + obj.status = ObjectStatus.FAILED + if obj._parent is not None: + # call handler on parent, which may change parent and/or siblings + obj._parent.child_deactivation_handler(obj, exc) + obj._parent.child_status_update() + # update children (deactivate all) + for obj in obj.iter_children(status_neq=ObjectStatus.FAILED): + obj.deactivate(PropagatedEvent(exc=exc, parent=obj), level=None) + + +class RegexDict(dict): + """ Utilities to find dictionary entries using regular expressions + Credit: https://stackoverflow.com/questions/21024822/python-accessing-dictionary-with-wildcards + """ + + def get_matching_value(self, query): + """Return the value corresponding to query""" + return (self[key] for key in self if re.search(r"(?P\w+)", query)) + + def get_all_matching_values(self, queries: list): + """Return a tuple of all matching values corresponding to each entry in a list of queries""" + return (match for query in queries for match in self.get_matching_value(query)) + + diff --git a/src/util/catalog.py b/src/util/catalog.py new file mode 100644 index 000000000..a4c619fe1 --- /dev/null +++ b/src/util/catalog.py @@ -0,0 +1,178 @@ +""" Utilities for constructing ESM-intake catalogs for processed data + Source: + https://gitlab.dkrz.de/data-infrastructure-services/intake-esm/-/blob/master/builder/notebooks/dkrz_era5_disk_catalog.ipynb +""" +import fnmatch +import datetime +import dask +from intake.source.utils import reverse_format +import os +import re +import subprocess +from pathlib import Path +import itertools +import logging +from src import cli + +_log = logging.getLogger(__name__) + + +def _reverse_filename_format(file_basename, filename_template=None, gridspec_template=None): + """ + Uses intake's ``reverse_format`` utility to reverse the string method format. + Given format_string and resolved_string, find arguments + that would give format_string.format(arguments) == resolved_string + """ + try: + return reverse_format(filename_template, file_basename) + except ValueError: + try: + return reverse_format(gridspec_template, file_basename) + except Exception as exc: + print( + f'Failed to parse file: {file_basename} using patterns: {filename_template}: {exc}' + ) + return {} + + +def _extract_attr_with_regex(input_str: str, regex: str, strip_chars=None): + pattern = re.compile(regex, re.IGNORECASE) + match = re.findall(pattern, input_str) + if match: + match = max(match, key=len) + if isinstance(match, tuple): + match = ''.join(match) + if strip_chars: + match = match.strip(strip_chars) + return match + else: + return None + + +exclude_patterns = ['*/files/*', '*/latest/*'] + + +def _filter_func(path: str) -> bool: + return not any( + fnmatch.fnmatch(path, pat=exclude_pattern) for exclude_pattern in exclude_patterns + ) + + +def mdtf_pp_parser(file_path: str) -> dict: + """ Extract attributes of a file using information from MDTF OUTPUT DRS + """ + # get catalog in information from pp file name + freq_regex = r'/1hr/|/3hr/|/6hr/|/day/|/fx/|/mon/|/monClim/|/subhr/|/seas/|/yr/' + # YYYYMMDD:HHMMSS-YYYYMMDD:HHMMSS + # (([numbers in range 0-9 ]{repeat previous exactly 4 time}[numbers in range 0-1] + # [numbers in range 0-9][numbers in range 0-3][numbers in range 0-9]) + # (optional colon)(([numbers in range 0-2][numbers in range 0-3])([numbers in range 0-5][numbers in range 0-9]) + # {repeat previous exactly 2 times})*=0 or more of the HHMMSS group + # -(repeat the same regex for the second date string in the date range) + time_range_regex = r'([0-9]{4}[0-1][0-9][0-3][0-9])' \ + r'(:?)(([0-2][0-3])([0-5][0-9]){2})*' \ + r'(-)([0-9]{4}[0-1][0-9][0-3][0-9])' \ + r'(:?)(([0-2][0-3])([0-5][0-9]){2})*' + file_basename = os.path.basename(file_path) + + filename_template = ( + '{dataset_name}.{variable_id}.{frequency}.nc' + ) + + f = _reverse_filename_format(file_basename, filename_template=filename_template) + # ^..^ + # /o o\ + # oo--oo~~~ + cat_entry = dict() + cat_entry.update(f) + cat_entry['path'] = file_path + cat_entry['frequency'] = _extract_attr_with_regex(file_path, regex=freq_regex, strip_chars='/') + cat_entry['time_range'] = _extract_attr_with_regex(cat_entry['dataset_name'], regex=time_range_regex) + cat_entry['experiment_id'] = cat_entry['dataset_name'].split('_' + cat_entry['time_range'])[0] + + return cat_entry + + +def get_file_list(output_dir: str) -> list: + """Get a list of files in a directory""" + + cmd = ['find', output_dir, '-mindepth', '1', '-maxdepth', '5', '-type', "d"] + proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + output = proc.stdout.read().decode('utf-8').split() + dirs = [Path(entry) for entry in output] + + @dask.delayed + def _file_dir_files(directory): + try: + cmd = ['find', '-L', directory.as_posix(), '-name', '*.nc', '-type', "f"] + proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + output = proc.stdout.read().decode('utf-8').split() + except Exception as exc: + print(exc) + output = [] + return output + + print('Getting list of assets...\n') + filelist = [_file_dir_files(d) for d in dirs] + + filelist = dask.compute(*filelist) + + filelist = set(list(itertools.chain(*filelist))) + new_filelist = list(filelist) + return new_filelist + + +def define_pp_catalog_assets(config, cat_file_name: str) -> dict: + """ Define the version and attributes for the post-processed data catalog""" + cmip6_cv_info = cli.read_config_file(config.CODE_ROOT, + "data/cmip6-cmor-tables/Tables", + "CMIP6_CV.json") + + cat_dict = {'esmcat_version': datetime.datetime.today().strftime('%Y-%m-%d'), + 'description': 'Post-processed dataset for MDTF-diagnostics package', + 'attributes': [] + } + + for att in cmip6_cv_info['CV']['required_global_attributes']: + if att == 'Conventions': + att = "convention" + cat_dict["attributes"].append( + dict(column_name=att, + vocabulary=f"https://github.com/WCRP-CMIP/CMIP6_CVs/blob/master/" + f"CMIP6_required_global_attributes.json" + ) + ) + + cat_dict["assets"] = { + "column_name": "path", + "format": "netcdf" + } + cat_dict["aggregation_control"] = { + "variable_column_name": "variable_id", + "groupby_attrs": [ + "activity_id", + "institution_id", + "source_id", + "experiment_id", + "frequency", + "table_id", + "grid_label", + "realm", + "variant_label", + "time_range" + ], + "aggregations": [ + { + "type": "union", + "attribute_name": "variable_id", + "options": {} + }, + { + "type": "join_existing", + "attribute_name": "time_range", + "options": {"dim": "time", "coords": "minimal", "compat": "override"} + } + ] + } + + return cat_dict diff --git a/src/util/dataclass.py b/src/util/dataclass.py index 233372198..84f5a58fe 100644 --- a/src/util/dataclass.py +++ b/src/util/dataclass.py @@ -11,21 +11,57 @@ from . import exceptions import logging + _log = logging.getLogger(__name__) -class RegexPatternBase(): + +# The ClassMaker is cribbed from SO +# https://stackoverflow.com/questions/1176136/convert-string-to-python-class-object +# Classmaker and the @catalog_class.maker decorator allow class instantiation from +# strings. The main block can simply call the desired class using the convention +# argument instead of messy if/then/else blocks +# Instantiate the class maker with catalog_class = ClassMaker() + +class ClassMaker: + """ Class to instantiate other classes from strings""" + + def __init__(self): + self.classes = {} + + def add_class(self, c): + self.classes[c.__name__] = c + + # define the class decorator to return the class passed + def maker(self, c): + self.add_class(c) + return c + + def __getitem__(self, n): + return self.classes[n] + + +class RegexPatternBase: """Dummy parent class for :class:`RegexPattern` and :class:`ChainedRegexPattern`. """ pass + class RegexPattern(collections.UserDict, RegexPatternBase): """Wraps :py:class:`re.Pattern` with more convenience methods for the use case of parsing information in a string, using a regex with named capture groups corresponding to the data fields being collected from the string. """ + + data: dict + fields: frozenset + input_string: str = "" + input_field: str = "" + is_matched: bool = False + _defaults: dict + def __init__(self, regex, defaults=None, input_field=None, - match_error_filter=None): + match_error_filter=None): """Constructor. Args: @@ -53,6 +89,9 @@ def __init__(self, regex, defaults=None, input_field=None, is_matched (bool): True if the last call to :meth:`match` was successful, False otherwise. """ + self.data = dict() + self.input_string = "" + self.is_matched = False try: if isinstance(regex, re.Pattern): self.regex = regex @@ -86,7 +125,7 @@ def _update_fields(self): self.regex_fields = frozenset(self.regex.groupindex.keys()) self.fields = self.regex_fields.union(self._defaults.keys()) if self.input_field: - self.fields = self.fields.union((self.input_field, )) + self.fields = self.fields.union((self.input_field,)) self.clear() def update_defaults(self, d): @@ -122,7 +161,7 @@ def match(self, str_, *args): *match_error_filter* are not met. One of RegexParseError or RegexSuppressedError is always raised on failure. """ - self.clear() # to be safe + self.clear() # to be safe self.input_string = str_ m = self.regex.fullmatch(str_, *args) if not m: @@ -141,7 +180,7 @@ def match(self, str_, *args): f"Couldn't match {str_} against {self.regex}.") else: self.data = m.groupdict(default=NOTSET) - for k,v in self._defaults.items(): + for k, v in self._defaults.items(): if self.data.get(k, NOTSET) is NOTSET: self.data[k] = v if self.input_field: @@ -151,7 +190,7 @@ def match(self, str_, *args): if any(self.data[f] is NOTSET for f in self.fields): bad_names = [f for f in self.fields if self.data[f] is NOTSET] raise exceptions.RegexParseError((f"Couldn't match the " - f"following fields in {str_}: " + ', '.join(bad_names) )) + f"following fields in {str_}: " + ', '.join(bad_names))) self.is_matched = True def _validate_match(self, match_obj): @@ -164,7 +203,7 @@ def __str__(self): if not self.is_matched: str_ = ', '.join(self.fields) else: - str_ = ', '.join([f'{k}={v}' for k,v in self.data.items()]) + str_ = ', '.join([f'{k}={v}' for k, v in self.data.items()]) return f"<{self.__class__.__name__}({str_})>" def __copy__(self): @@ -192,11 +231,15 @@ def __deepcopy__(self, memo): obj.data = copy.deepcopy(self.data, memo) return obj + class RegexPatternWithTemplate(RegexPattern): """Adds formatted output to :class:`RegexPattern`. """ + + template: str = "" + def __init__(self, regex, defaults=None, input_field=None, - match_error_filter=None, template=None, log=_log): + match_error_filter=None, template=None, log=_log): """Constructor. Args: @@ -208,7 +251,8 @@ def __init__(self, regex, defaults=None, input_field=None, Other arguments are the same as in :class:`RegexPattern`. """ super(RegexPatternWithTemplate, self).__init__(regex, defaults=defaults, - input_field=input_field, match_error_filter=match_error_filter) + input_field=input_field, + match_error_filter=match_error_filter) self.template = template for f in self.fields: if f not in self.template: @@ -251,6 +295,7 @@ def __deepcopy__(self, memo): obj.data = copy.deepcopy(self.data, memo) return obj + class ChainedRegexPattern(RegexPatternBase): """Class which takes an 'or' of multiple :class:`RegexPattern`\s, to parse data that may be represented as a string in one of multiple formats. @@ -259,8 +304,9 @@ class ChainedRegexPattern(RegexPatternBase): one that succeeds determining the parsed field values. Public methods work the same as on :class:`RegexPattern`. """ + def __init__(self, *string_patterns, defaults=None, input_field=None, - match_error_filter=None): + match_error_filter=None): """Constructor. Args: @@ -277,9 +323,16 @@ def __init__(self, *string_patterns, defaults=None, input_field=None, Other arguments and attributes are the same as in :class:`RegexPattern`. """ + + input_string: str + _match: int + is_matched: bool = False + # NB, changes attributes on patterns passed as arguments, so # once created they can't be used on their own new_pats = [] + self.input_string = "" + self._match = -1 for pat in string_patterns: if isinstance(pat, RegexPattern): new_pats.append(pat) @@ -302,7 +355,7 @@ def __init__(self, *string_patterns, defaults=None, input_field=None, @property def is_matched(self): - return (self._match >= 0) + return self._match >= 0 @property def data(self): @@ -347,19 +400,19 @@ def match(self, str_, *args): self._match_error_filter.match(str_, *args) except Exception as exc: raise exceptions.RegexParseError((f"Couldn't match {str_} " - f"against any pattern in {self.__class__.__name__}.")) + f"against any pattern in {self.__class__.__name__}.")) raise exceptions.RegexSuppressedError(str_) elif self._match_error_filter: raise exceptions.RegexSuppressedError(str_) else: raise exceptions.RegexParseError((f"Couldn't match {str_} " - f"against any pattern in {self.__class__.__name__}.")) + f"against any pattern in {self.__class__.__name__}.")) def __str__(self): if not self.is_matched: str_ = ', '.join(self.fields) else: - str_ = ', '.join([f'{k}={v}' for k,v in self.data.items()]) + str_ = ', '.join([f'{k}={v}' for k, v in self.data.items()]) return f"<{self.__class__.__name__}({str_})>" def format(self): @@ -381,8 +434,10 @@ def __deepcopy__(self, memo): match_error_filter=copy.deepcopy(self._match_error_filter, memo) ) + # --------------------------------------------------------- + NOTSET = basic.sentinel_object_factory('NotSet') """ Sentinel object to detect uninitialized values for fields in :func:`mdtf_dataclass` @@ -398,7 +453,8 @@ def __deepcopy__(self, memo): we use the second solution described in ``__. """ -def _mdtf_dataclass_get_field_types(obj, f): + +def _mdtf_dataclass_get_field_types(obj, f, log): """Common functionality for :func:`_mdtf_dataclass_type_coercion` and :func:`_mdtf_dataclass_type_check`. Given a :py:class:`datacalsses.Field` object *f*, return either a tuple of the type its value should be coerced to @@ -407,34 +463,37 @@ def _mdtf_dataclass_get_field_types(obj, f): """ if not f.init: # ignore fields that aren't handled at init - return (None, None) + return None, None value = getattr(obj, f.name) # ignore unset field values, regardless of type if value is None or value is NOTSET: - return (None, None) + return None, None # guess what types are valid new_type = None if f.type is typing.Any or isinstance(f.type, typing.TypeVar): - return (None, None) + return None, None if dataclasses.is_dataclass(f.type): # ignore if type is a dataclass: use this type annotation to # implement dataclass inheritance if not isinstance(obj, f.type): raise exceptions.DataclassParseError((f"Field {f.name} specified " - f"as dataclass {f.type.__name__}, which isn't a parent class " - f"of {obj.__class__.__name__}.")) - return (None, None) + f"as dataclass {f.type.__name__}, which isn't a parent class " + f"of {obj.__class__.__name__}.")) + return None, None elif isinstance(f.type, typing._GenericAlias) \ - or isinstance(f.type, typing._SpecialForm): + or isinstance(f.type, typing._SpecialForm): # type is a generic from typing module, eg "typing.List" if f.type.__origin__ is typing.Union: - new_type = None # can't do coercion, but can test type + new_type = None # can't do coercion, but can test type valid_types = list(f.type.__args__) - elif issubclass(f.type.__origin__, typing.Generic): - return (None, None) # can't do anything in this case else: - new_type = f.type.__origin__ - valid_types = [new_type] + try: + new_type = f.type.__origin__ + valid_types = [new_type] + except Exception as exc: + log.debug(f"Caught exception when checking types for {f.type.__name__}", exc, + "Routine will return None") + return None, None # can't do anything in this case else: new_type = f.type valid_types = [new_type] @@ -444,7 +503,8 @@ def _mdtf_dataclass_get_field_types(obj, f): valid_types.append(type(f.default)) if not isinstance(f.default_factory, dataclasses._MISSING_TYPE): valid_types.append(type(f.default_factory())) - return (new_type, valid_types) + return new_type, valid_types + def _mdtf_dataclass_type_coercion(self, log): """Do type checking on all dataclass fields after the auto-generated @@ -457,12 +517,12 @@ def _mdtf_dataclass_type_coercion(self, log): """ for f in dataclasses.fields(self): value = getattr(self, f.name, NOTSET) - new_type, valid_types = _mdtf_dataclass_get_field_types(self, f) + new_type, valid_types = _mdtf_dataclass_get_field_types(self, f, log) try: if valid_types is None or isinstance(value, tuple(valid_types)): - continue # don't coerce if we're already a valid type + continue # don't coerce if we're already a valid type if new_type is None or hasattr(new_type, '__abstract_methods__'): - continue # can't do type coercion + continue # can't do type coercion else: if hasattr(new_type, 'from_struct'): new_value = new_type.from_struct(value) @@ -475,12 +535,13 @@ def _mdtf_dataclass_type_coercion(self, log): object.__setattr__(self, f.name, new_value) except (TypeError, ValueError, dataclasses.FrozenInstanceError) as exc: raise exceptions.DataclassParseError((f"{self.__class__.__name__}: " - f"Couldn't coerce value {repr(value)} for field {f.name} from " - f"type {type(value)} to type {new_type}.")) from exc + f"Couldn't coerce value {repr(value)} for field {f.name} from " + f"type {type(value)} to type {new_type}.")) from exc except Exception as exc: log.exception("%s: Caught exception: %r", self.__class__.__name__, exc) raise exc + def _mdtf_dataclass_type_check(self, log): """Do type checking on all dataclass fields after ``__init__`` and ``__post_init__`` methods. @@ -496,18 +557,20 @@ def _mdtf_dataclass_type_check(self, log): continue if value is MANDATORY: raise exceptions.DataclassParseError((f"{self.__class__.__name__}: " - f"No value supplied for mandatory field {f.name}.")) + f"No value supplied for mandatory field {f.name}.")) - _, valid_types = _mdtf_dataclass_get_field_types(self, f) + _, valid_types = _mdtf_dataclass_get_field_types(self, f, log) if valid_types is not None and not isinstance(value, tuple(valid_types)): log.exception("%s: Failed type check for field '%s': %s != %s.", - self.__class__.__name__, f.name, type(value), valid_types) + self.__class__.__name__, f.name, type(value), valid_types) raise exceptions.DataclassParseError((f"{self.__class__.__name__}: " - f"Expected {f.name} to be {f.type}, got {type(value)} " - f"({repr(value)}).")) + f"Expected {f.name} to be {f.type}, got {type(value)} " + f"({repr(value)}).")) + DEFAULT_MDTF_DATACLASS_KWARGS = {'init': True, 'repr': True, 'eq': True, - 'order': False, 'unsafe_hash': False, 'frozen': False} + 'order': False, 'unsafe_hash': False, 'frozen': False} + # declaration to allow calling with and without args: python cookbook 9.6 # https://github.com/dabeaz/python-cookbook/blob/master/src/9/defining_a_decorator_that_takes_an_optional_argument/example.py @@ -516,7 +579,7 @@ def _mdtf_dataclass_type_check(self, log): def mdtf_dataclass(cls=None, **deco_kwargs): """Wrap the Python :py:func:`~dataclasses.dataclass` class decorator to customize dataclasses to provide rudimentary type checking and conversion. This - is hacky, since dataclasses don't enforce type annontations for their fields. + is hacky, since dataclasses don't enforce type annotations for their fields. A better solution would be to use the third-party `cattrs `__ package, which has essentially the same aim. @@ -567,6 +630,7 @@ def mdtf_dataclass(cls=None, **deco_kwargs): # create dummy __post_init__ if none defined, so we can wrap it. # contrast with what we do below in regex_dataclass() def _dummy_post_init(self, *args, **kwargs): pass + type.__setattr__(cls, '__post_init__', _dummy_post_init) # apply dataclasses' decorator @@ -585,6 +649,7 @@ def _new_post_init(self, *args, **kwargs): _mdtf_dataclass_type_coercion(self, _post_init_log) _old_post_init(self, *args, **kwargs) _mdtf_dataclass_type_check(self, _post_init_log) + type.__setattr__(cls, '__post_init__', _new_post_init) return cls @@ -626,13 +691,14 @@ def _regex_dataclass_preprocess_kwargs(self, kwargs): new_kw.update(new_d) except exceptions.WormKeyError as exc: raise exceptions.DataclassParseError((f"{self.__class__.__name__}: " - f"Tried to make inconsistent field assignment when parsing " - f"{f.name} as an instance of {f.type.__name__}.")) from exc + f"Tried to make inconsistent field assignment when parsing " + f"{f.name} as an instance of {f.type.__name__}.")) from exc post_init = dict() for f in dataclasses.fields(self): if not f.init and f.name in new_kw: post_init[f.name] = new_kw.pop(f.name) - return (new_kw, post_init) + return new_kw, post_init + def regex_dataclass(pattern, **deco_kwargs): """Decorator combining the functionality of :class:`RegexPattern` and @@ -669,6 +735,7 @@ def _dataclass_decorator(cls): # hasattr().) __post_init__ of all parents will have been called when # the parent classes are instantiated by _regex_dataclass_preprocess_kwargs. def _dummy_post_init(self, *args, **kwargs): pass + type.__setattr__(cls, '__post_init__', _dummy_post_init) # apply dataclasses' decorator @@ -678,9 +745,10 @@ def _dummy_post_init(self, *args, **kwargs): pass for f in dataclasses.fields(cls): if is_regex_dataclass(f.type) and f.type not in cls.__mro__: raise TypeError((f"{cls.__name__}: Field {f.name} specified as " - f"{f.type.__name__}, but we don't inherit from it.")) + f"{f.type.__name__}, but we don't inherit from it.")) _old_init = cls.__init__ + @functools.wraps(_old_init) def _new_init(self, first_arg=None, *args, **kwargs): if isinstance(first_arg, str) and not args and not kwargs: @@ -690,7 +758,7 @@ def _new_init(self, first_arg=None, *args, **kwargs): first_arg = None kwargs = self._pattern.data new_kw, other_kw = _regex_dataclass_preprocess_kwargs(self, kwargs) - for k,v in other_kw.items(): + for k, v in other_kw.items(): # set field values that aren't arguments to _old_init object.__setattr__(self, k, v) if first_arg is None: @@ -700,6 +768,7 @@ def _new_init(self, first_arg=None, *args, **kwargs): _mdtf_dataclass_type_coercion(self, _log) _mdtf_dataclass_type_check(self, _log) + type.__setattr__(cls, '__init__', _new_init) def _from_string(cls_, str_, *args): @@ -707,58 +776,17 @@ def _from_string(cls_, str_, *args): Used by :func:`regex_dataclass` for parsing field values and automatic type coercion. """ + cls_._pattern.match(str_, *args) return cls_(**cls_._pattern.data) + type.__setattr__(cls, 'from_string', classmethod(_from_string)) type.__setattr__(cls, '_is_regex_dataclass', True) type.__setattr__(cls, '_pattern', pattern) return cls - return _dataclass_decorator - -def dataclass_factory(dataclass_decorator, class_name, *parents, **kwargs): - """Function that returns a dataclass (ie, a decorated class) whose fields - are the union of the fields in *parents*, which the new dataclass inherits - from. - - Args: - dataclass_decorator (function): decorator to apply to the new class. - class_name (str): name of the new class. - parents: collection of other mdtf_dataclasses to inherit from. Order in - the collection determines the MRO. - kwargs: Optional; arguments to pass to dataclass_decorator when it's - applied to produce the returned class. - """ - def _to_dataclass(self, cls_, **kwargs_): - f"""Method to create an instance of one of the parent classes of - {class_name} by copying over the relevant subset of fields. - """ - # above docstring gets templated - new_kwargs = filter_dataclass(self, cls_) - new_kwargs.update(kwargs_) - return cls_(**new_kwargs) - - def _from_dataclasses(cls_, *other_dcs, **kwargs_): - f"""Class method to create a new instance of {class_name} from instances - of its parents, along with any other field values passed in kwargs. - """ - # above docstring gets templated - new_kwargs = dict() - for dc in other_dcs: - new_kwargs.update(filter_dataclass(dc, cls_)) - new_kwargs.update(kwargs_) - return cls_(**new_kwargs) - - methods = { - 'to_dataclass': _to_dataclass, - 'from_dataclasses': classmethod(_from_dataclasses), - } - for dc in parents: - method_nm = 'to_' + dc.__name__ - methods[method_nm] = functools.partialmethod(_to_dataclass, cls_=dc) - new_cls = type(class_name, tuple(parents), methods) - return dataclass_decorator(new_cls, **kwargs) + return _dataclass_decorator def filter_dataclass(d, dc, init=False): @@ -788,13 +816,13 @@ def filter_dataclass(d, dc, init=False): assert dataclasses.is_dataclass(dc) if dataclasses.is_dataclass(d): if isinstance(d, type): - d = d() # d is a class; instantiate with default field values + d = d() # d is a class; instantiate with default field values d = dataclasses.asdict(d) if not init or (init == 'all'): ans = {f.name: d[f.name] for f in dataclasses.fields(dc) if f.name in d} else: - ans = {f.name: d[f.name] for f in dataclasses.fields(dc) \ - if (f.name in d and f.init)} + ans = {f.name: d[f.name] for f in dataclasses.fields(dc) + if (f.name in d and f.init)} if init or (init == 'all'): init_fields = filter( (lambda f: f.type == dataclasses.InitVar), diff --git a/src/util/datelabel.py b/src/util/datelabel.py index 560153a98..948824755 100644 --- a/src/util/datelabel.py +++ b/src/util/datelabel.py @@ -47,13 +47,16 @@ from src import util import logging + _log = logging.getLogger(__name__) + # =============================================================== # following adapted from Alexandre Decan's python-intervals # https://github.com/AlexandreDecan/python-intervals ; LGPLv3 # We neglect the case of noncontiguous or semi-infinite intervals here + class AtomicInterval(object): """ This class represents an atomic interval. @@ -122,9 +125,9 @@ def is_empty(self): True if interval is empty, False otherwise. """ return ( - self._lower > self._upper or - (self._lower == self._upper \ - and (self._left == self.OPEN or self._right == self.OPEN)) + self._lower > self._upper or + (self._lower == self._upper \ + and (self._left == self.OPEN or self._right == self.OPEN)) ) def replace(self, left=None, lower=None, upper=None, right=None, ignore_inf=True): @@ -183,7 +186,7 @@ def overlaps(self, other, adjacent=False): raise TypeError('Only AtomicInterval instances are supported.') if self._lower < other.lower or \ - (self._lower == other.lower and self._left == self.CLOSED): + (self._lower == other.lower and self._left == self.CLOSED): first, second = self, other else: first, second = other, self @@ -284,11 +287,11 @@ def __or__(self, other): def __contains__(self, item): if isinstance(item, AtomicInterval): left = item._lower > self._lower or ( - item._lower == self._lower \ + item._lower == self._lower and (item._left == self._left or self._left == self.CLOSED) ) right = item._upper < self._upper or ( - item._upper == self._upper and \ + item._upper == self._upper and (item._right == self._right or self._right == self.CLOSED) ) return left and right @@ -400,11 +403,12 @@ def contiguous_span(cls, *args): """ ints = sorted(args, key=op.attrgetter('lower')) for i in list(range(0, len(ints) - 1)): - if not ints[i].adjoins_left(ints[i+1]): + if not ints[i].adjoins_left(ints[i + 1]): raise ValueError(("Intervals {} and {} not contiguous and " - "nonoverlapping.").format(ints[i], ints[i+1])) + "nonoverlapping.").format(ints[i], ints[i + 1])) return AtomicInterval(ints[0].left, ints[0].lower, - ints[-1].upper, ints[-1].right) + ints[-1].upper, ints[-1].right) + # =============================================================== @@ -427,10 +431,13 @@ class DatePrecision(enum.IntEnum): HOUR = 4 MINUTE = 5 SECOND = 6 + MICROSECOND = 7 + class DateMixin(object): """Utility methods for dealing with dates. """ + @staticmethod def date_format(dt, precision=None): """Print date *dt* in YYYYMMDDHHMMSS format, with length being set @@ -447,7 +454,7 @@ def date_format(dt, precision=None): str_ = '{0.tm_year:04}{0.tm_mon:02}{0.tm_mday:02}'.format(tup_) str_ = str_ + '{0.tm_hour:02}{0.tm_min:02}{0.tm_sec:02}'.format(tup_) if precision: - return str_[:2*(precision + 1)] + return str_[:2 * (precision + 1)] else: return str_ @@ -456,7 +463,7 @@ def increment(cls, dt, precision): """Return a copy of *dt* advanced by one time unit as specified by the *precision* attribute. """ - if precision == DatePrecision.MONTH: # can't handle this with timedeltas + if precision == DatePrecision.MONTH: # can't handle this with timedeltas if dt.month == 12: return dt.replace(year=(dt.year + 1), month=1) else: @@ -469,7 +476,7 @@ def decrement(cls, dt, precision): """Return a copy of *dt* moved back by one time unit as specified by the *precision* attribute. """ - if precision == DatePrecision.MONTH: # can't handle this with timedeltas + if precision == DatePrecision.MONTH: # can't handle this with timedeltas if dt.month == 1: return dt.replace(year=(dt.year - 1), month=12) else: @@ -490,13 +497,15 @@ def _inc_dec_common(dt, precision, delta): # nb: can't handle this with timedeltas return dt.replace(year=(dt.year + delta)) elif precision == DatePrecision.DAY: - td = datetime.timedelta(days = delta) + td = datetime.timedelta(days=delta) elif precision == DatePrecision.HOUR: - td = datetime.timedelta(hours = delta) + td = datetime.timedelta(hours=delta) elif precision == DatePrecision.MINUTE: - td = datetime.timedelta(minutes = delta) + td = datetime.timedelta(minutes=delta) elif precision == DatePrecision.SECOND: - td = datetime.timedelta(seconds = delta) + td = datetime.timedelta(seconds=delta) + elif precision == DatePrecision.MICROSECOND: + td = datetime.timedelta(microseconds=delta) else: # prec == 2 case handled in calling logic raise ValueError(f"Malformed input: {repr(dt)} prec={precision} delta={delta}") @@ -526,17 +535,17 @@ def __init__(self, start, end=None, precision=None, log=_log): Args: start (str or datetime): Start date of the interval as a - :py:class:`~datetime.datetime` object, or string in YYYYMMDD... - or YYYY-MM-DD formats, or a two-item collection or string defining - *both* endpoints of the interval as strings in YYYYMMDD... format - separated by a single hyphen. + :py:class:`~datetime.datetime` object, or string in YYYYmmdd... + YYYY-MM-DD, or YYYY:MM:DD formats, or a two-item collection or string defining + *both* endpoints of the interval as strings in YYYYmmdd... format + separated by a single hyphen or colon. end (str or datetime): Optional. End date of the interval as a - :py:class:`~datetime.datetime` object, or string in YYYYMMDD... - or YYYY-MM-DD formats. Ignored if the entire range was specified + :py:class:`~datetime.datetime` object, or string in YYYYmmdd..., + YYYY-mm-dd, or YYYY:mm:dd formats. Ignored if the entire range was specified as a string in *start*. precision (int or :class:`DatePrecision`): Optional. Manually set precision of date endpoints defining the range. If not supplied, - set based on the length of the YYYYMMDD... strings supplied in + set based on the length of the YYYYmmdd... strings supplied in *start* and *end*. Raises: @@ -548,7 +557,7 @@ def __init__(self, start, end=None, precision=None, log=_log): """ if not end: if isinstance(start, str): - (start, end) = start.split(self._range_sep) + (start, end) = re.split('[-, :]', start) elif len(start) == 2: (start, end) = start else: @@ -558,7 +567,7 @@ def __init__(self, start, end=None, precision=None, log=_log): dt1, prec1 = self._coerce_to_datetime(end, is_lower=False) if not (dt0 < dt1): log.warning('Args to DateRange out of order (%s >= %s)', - start, end) + start, end) dt0, prec0 = self._coerce_to_datetime(end, is_lower=True) dt1, prec1 = self._coerce_to_datetime(start, is_lower=False) # call AtomicInterval's init @@ -568,9 +577,10 @@ def __init__(self, start, end=None, precision=None, log=_log): precision = DatePrecision(precision) if precision > prec0 or precision > prec1: raise util.MixedDatePrecisionException(( - "Attempted to init DateRange with manual prec {}, but date " - "arguments have precs {}, {}").format(precision, prec0, prec1) - ) + "Attempted to init DateRange with manual prec {}, but date " + "arguments have precs {}, {}").format(precision, prec0, + prec1) + ) self.precision = precision else: self.precision, _ = self._precision_check(prec0, prec1) @@ -593,13 +603,13 @@ def _precision_check(*args): warnings.warn('Expected precisions {} to be identical'.format( args )) - return (min_, max_) + return min_, max_ @staticmethod def _coerce_to_datetime(dt, is_lower): if isinstance(dt, datetime.datetime): # datetime specifies time to within second - return (dt, DatePrecision.SECOND) + return dt, DatePrecision.SECOND if isinstance(dt, datetime.date): # date specifies time to within day return ( @@ -609,9 +619,9 @@ def _coerce_to_datetime(dt, is_lower): else: tmp = Date._coerce_to_self(dt) if is_lower: - return (tmp.lower, tmp.precision) + return tmp.lower, tmp.precision else: - return (tmp.upper, tmp.precision) + return tmp.upper, tmp.precision @classmethod def _coerce_to_self(cls, item, precision=None): @@ -628,7 +638,7 @@ def _coerce_to_self(cls, item, precision=None): return cls(item) except Exception: raise TypeError((f"Comparison not supported between {cls.__name__} " - f"and {type(item).__name__} ({repr(item)}).")) + f"and {type(item).__name__} ({repr(item)}).")) @property def start_datetime(self): @@ -668,7 +678,7 @@ def end(self): @classmethod def from_contiguous_span(cls, *args): """Given multiple DateRanges, return interval containing them - only if their time intervals are continguous and nonoverlapping. + only if their time intervals are contiguous and non-overlapping. """ if len(args) == 1 and isinstance(args[0], DateRange): return args[0] @@ -698,6 +708,7 @@ def format(self, precision=None): # need to decrement upper bound because interval is open there return self.date_format(self.lower, precision) + self._range_sep \ + self.date_format(self.decrement(self.upper, precision), precision) + __str__ = format def __repr__(self): @@ -716,12 +727,13 @@ def __contains__(self, item): """ item = self._coerce_to_self(item) left_gt = item._lower > self._lower - left_eq = self.start.overlaps(item.start) \ - and (item._left == self._left or self._left == self.CLOSED) + left_eq = (self.start.overlaps(item.start) + and (item._left == self._left or self._left == self.CLOSED)) right_lt = item._upper < self._upper - right_eq = self.end.overlaps(item.end) \ - and (item._right == self._right or self._right == self.CLOSED) + right_eq = (self.end.overlaps(item.end) + and (item._right == self._right or self._right == self.CLOSED)) return (left_gt or left_eq) and (right_lt or right_eq) + contains = __contains__ def overlaps(self, item): @@ -747,12 +759,16 @@ def _date_range_compare_common(self, other, func_name): def __lt__(self, other): return self._date_range_compare_common(other, '__lt__') + def __le__(self, other): return self._date_range_compare_common(other, '__le__') + def __gt__(self, other): return self._date_range_compare_common(other, '__gt__') + def __ge__(self, other): return self._date_range_compare_common(other, '__ge__') + def __eq__(self, other): # Don't want check for static date in this case try: @@ -766,6 +782,7 @@ def __eq__(self, other): def __hash__(self): return hash((self.__class__, self.lower, self.upper, self.precision)) + class Date(DateRange): """Defines a single date with variable level precision. @@ -787,7 +804,7 @@ class Date(DateRange): of DatePrecision.YEAR. """ - _datetime_attrs = ('year','month','day','hour','minute','second') + _datetime_attrs = ('year', 'month', 'day', 'hour', 'minute', 'second') def __init__(self, *args, **kwargs): """Constructor. @@ -820,13 +837,13 @@ def __init__(self, *args, **kwargs): if prec is not None and not isinstance(prec, DatePrecision): prec = DatePrecision(prec) - assert prec <= 6 # other values not supported + assert prec <= 6 # other values not supported for i in list(range(prec)): setattr(self, self._datetime_attrs[i], dt_args[i]) if prec == 1: - dt_args = (dt_args[0], 1, 1) # missing month & day + dt_args = (dt_args[0], 1, 1) # missing month & day elif prec == 2: - dt_args = (dt_args[0], dt_args[1], 1) # missing day + dt_args = (dt_args[0], dt_args[1], 1) # missing day dt = datetime.datetime(*dt_args) # call DateRange's init @@ -847,13 +864,14 @@ def _parse_datetime(cls, dt): @classmethod def _parse_input_string(cls, s): - """Parse date strings in `YYYY-MM-DD` or `YYYYMMDDHH` formats. + """Parse date strings in `YYYY-MM-DD:HH:MM:SS` or `YYYYMMDDHHMMSS` formats. """ - if '-' in s: - return tuple([int(ss) for ss in s.split('-')]) + + if '-' in s or ':' in s: + return tuple([int(ss) for ss in re.split('[-:s+]', s)]) ans = [int(s[0:4])] for i in list(range(4, len(s), 2)): - ans.append(int(s[i:(i+2)])) + ans.append(int(s[i:(i + 2)])) return tuple(ans) def format(self, precision=None): @@ -865,6 +883,7 @@ def format(self, precision=None): return self.date_format(self.lower, precision) else: return self.date_format(self.lower, self.precision) + __str__ = format def __repr__(self): @@ -883,7 +902,7 @@ def _tuple_compare(self, other, func): if self.is_static or getattr(other, 'is_static', False): if func == op.eq: # True only if both values are FXDates - return (self.is_static and getattr(other, 'is_static', False)) + return self.is_static and getattr(other, 'is_static', False) else: raise util.FXDateException(func_name='_tuple_compare') if not isinstance(other, self.__class__): @@ -917,15 +936,17 @@ def __eq__(self, other): return False def __ne__(self, other): - return (not self.__eq__(other)) # more foolproof + return not self.__eq__(other) # more foolproof def __hash__(self): return hash((self.__class__, self.lower, self.upper, self.precision)) + class _StaticTimeDependenceBase(object): """Dummy class to label sentinel objects for use in describing static data with no time dependence. """ + @property def is_static(self): """Property indicating time-independent data (eg, 'fx' in CMIP6 DRS.) @@ -939,6 +960,7 @@ def _coerce_to_self(cls, item): def format(self, precision=None): return "" + isoformat = format __str__ = format @@ -946,6 +968,7 @@ def format(self, precision=None): def date_format(dt, precision=None): return "" + class _FXDateMin(_StaticTimeDependenceBase, Date): def __init__(self): # call DateRange's init @@ -964,8 +987,11 @@ def start(self): @property def end(self): return self.lower + + FXDateMin = _FXDateMin() + class _FXDateMax(_StaticTimeDependenceBase, Date): def __init__(self): # call DateRange's init @@ -984,12 +1010,16 @@ def start(self): @property def end(self): return self.upper + + FXDateMax = _FXDateMax() + class _FXDateRange(_StaticTimeDependenceBase, DateRange): """Singleton placeholder/sentinel object for use in describing static data with no time dependence. """ + def __init__(self): # call DateRange's init super(_FXDateRange, self).__init__(datetime.datetime.min, datetime.datetime.max) @@ -1005,11 +1035,14 @@ def start(self): @property def end(self): return FXDateMax + + FXDateRange = _FXDateRange() """Singleton placeholder/sentinel object for use in describing static data with no time dependence. """ + class DateFrequency(datetime.timedelta): """Class representing a frequency or time period. @@ -1020,6 +1053,7 @@ class DateFrequency(datetime.timedelta): as is possible for :py:class:`~datetime.timedelta` and :py:class:`~datetime.datetime`. """ + # define __new__, not __init__, because timedelta is immutable def __new__(cls, quantity, unit=None): if isinstance(quantity, str) and (unit is None): @@ -1040,7 +1074,7 @@ def __new__(cls, quantity, unit=None): def is_static(self): """Property indicating time-independent data (e.g., ``fx`` in CMIP6 DRS.) """ - return (self.quantity == 0 and self.unit == "fx") + return self.quantity == 0 and self.unit == "fx" @classmethod def from_struct(cls, str_): @@ -1081,7 +1115,7 @@ def _parse_input_string(cls, quantity, unit): s = 'min' else: raise ValueError("Malformed input {} {}".format(quantity, unit)) - return (cls._get_timedelta_kwargs(q, s), {'quantity': q, 'unit': s}) + return cls._get_timedelta_kwargs(q, s), {'quantity': q, 'unit': s} @classmethod def _get_timedelta_kwargs(cls, q, s): @@ -1089,8 +1123,8 @@ def _get_timedelta_kwargs(cls, q, s): # internally set to maximum representable timedelta, for purposes of comparison tmp = datetime.timedelta.max return {'days': tmp.days, 'seconds': tmp.seconds, - 'microseconds': tmp.microseconds - } + 'microseconds': tmp.microseconds + } elif s == 'yr': return {'days': 365 * q} elif s == 'season': @@ -1115,6 +1149,7 @@ def format(self): return 'fx' else: return "{}{}".format(self.quantity, self.unit) + __str__ = format def format_local(self): @@ -1142,23 +1177,25 @@ def __eq__(self, other): return super(DateFrequency, self).__eq__(other) def __ne__(self, other): - return (not self.__eq__(other)) # more foolproof + return not self.__eq__(other) # more foolproof def __copy__(self): return self.__class__.__new__(self.__class__, self.quantity, unit=self.unit) def __deepcopy__(self, memo): return self.__class__.__new__(self.__class__, - copy.deepcopy(self.quantity, memo), unit=copy.deepcopy(self.unit, memo) - ) + copy.deepcopy(self.quantity, memo), unit=copy.deepcopy(self.unit, memo) + ) def __hash__(self): return hash((self.__class__, self.quantity, self.unit)) + class _FXDateFrequency(DateFrequency, _StaticTimeDependenceBase): """Singleton placeholder/sentinel object for use in describing static data with no time dependence. """ + # define __new__, not __init__, because timedelta is immutable def __new__(cls): return super(_FXDateFrequency, cls).__new__(cls, 'static') @@ -1173,21 +1210,25 @@ def __copy__(self): def __deepcopy__(self, memo): return self.__class__.__new__(self.__class__) + FXDateFrequency = _FXDateFrequency() """Singleton placeholder/sentinel object for use in describing static data with no time dependence. """ + class AbstractDateRange(abc.ABC): """Defines interface (set of attributes) for :class:`DateRange` objects. """ pass + class AbstractDate(abc.ABC): """Defines interface (set of attributes) for :class:`Date` objects. """ pass + class AbstractDateFrequency(abc.ABC): """Defines interface (set of attributes) for :class:`DateFrequency` objects. """ diff --git a/src/util/exceptions.py b/src/util/exceptions.py index ada7f74f5..5db43438b 100644 --- a/src/util/exceptions.py +++ b/src/util/exceptions.py @@ -7,6 +7,7 @@ from subprocess import CalledProcessError import logging + _log = logging.getLogger(__name__) @@ -25,6 +26,7 @@ def exit_on_exception(exc, msg=None): print(msg) exit_handler(code=1) + def exit_handler(code=1, msg=None): """Wraps all calls to :py:func:`sys.exit`; could do additional cleanup not handled by atexit() here. @@ -33,6 +35,7 @@ def exit_handler(code=1, msg=None): print(msg) sys.exit(code) + def chain_exc(exc, new_msg, new_exc_class=None): """Raise a new exception from an existing one, in order to give more context for debugging. See Python documentation on @@ -56,6 +59,7 @@ def chain_exc(exc, new_msg, new_exc_class=None): except Exception as chained_exc: return chained_exc + def exc_descriptor(exc): # MDTFEvents are raised during normal program operation; use correct wording # for log messages so user doesn't think it's an error @@ -64,11 +68,13 @@ def exc_descriptor(exc): else: return "Caught exception" + class TimeoutAlarm(Exception): """Dummy exception raised if a subprocess times out.""" # NOTE py3 builds timeout into subprocess; fix this pass + class MDTFBaseException(Exception): """Base class to describe all MDTF-specific errors that can happen during the framework's operation.""" @@ -78,59 +84,69 @@ def __repr__(self): # instead just print message return f'{self.__class__.__name__}("{str(self)}")' + class ChildFailureEvent(MDTFBaseException): """Exception raised when a member of the object hierarchy is deactivated because all its child objects have failed. """ + def __init__(self, obj): self.obj = obj def __str__(self): return (f"Deactivating {self.obj.full_name} due to failure of all " - f"child objects.") + f"child objects.") + class PropagatedEvent(MDTFBaseException): """Exception passed between members of the object hierarchy when a parent object (:class:`~core.MDTFObjectBase`) has been deactivated and needs to deactivate its children. """ + def __init__(self, exc, parent): self.exc = exc self.parent = parent def __str__(self): return (f"{exc_descriptor(self.exc)} {repr(self.exc)} from deactivation " - f"of parent {self.parent.full_name}.") + f"of parent {self.parent.full_name}.") class MDTFFileNotFoundError(FileNotFoundError, MDTFBaseException): """Wrapper for :py:class:`FileNotFoundError` which handles error codes so we don't have to remember to import :py:mod:`errno` everywhere. """ + def __init__(self, path): super(MDTFFileNotFoundError, self).__init__( errno.ENOENT, os.strerror(errno.ENOENT), path ) + class MDTFFileExistsError(FileExistsError, MDTFBaseException): """Wrapper for :py:class:`FileExistsError` which handles error codes so we don't have to remember to import :py:mod:`errno` everywhere. """ + def __init__(self, path): super(MDTFFileExistsError, self).__init__( errno.EEXIST, os.strerror(errno.EEXIST), path ) + class MDTFCalledProcessError(CalledProcessError, MDTFBaseException): """Wrapper for :py:class:`subprocess.CalledProcessError`.""" pass + class WormKeyError(KeyError, MDTFBaseException): """Raised when attempting to overwrite or delete an entry in a :class:`~src.util.basic.WormDict`. """ pass + class DataclassParseError(ValueError, MDTFBaseException): """Raised when parsing input data fails on a :func:`~src.util.dataclass.mdtf_dataclass` or @@ -138,12 +154,14 @@ class DataclassParseError(ValueError, MDTFBaseException): """ pass + class RegexParseError(ValueError, MDTFBaseException): """Raised when parsing input data fails on a :func:`~src.util.dataclass.RegexPattern`. """ pass + class RegexSuppressedError(ValueError, MDTFBaseException): """Raised when parsing input data fails on a :func:`~src.util.dataclass.RegexPattern`, but we've decided to supress @@ -151,32 +169,38 @@ class RegexSuppressedError(ValueError, MDTFBaseException): """ pass + class UnitsError(ValueError, MDTFBaseException): """Raised when trying to convert between quantities with physically inequivalent units. """ pass + class ConventionError(MDTFBaseException): """Exception raised by a duplicate variable convention name.""" + def __init__(self, conv_name): self.conv_name = conv_name def __str__(self): return f"Error in the definition of convention '{self.conv_name}'." + class MixedDatePrecisionException(MDTFBaseException): """Exception raised when we attempt to operate on :class:`Date` or :class:`DateRange` objects with differing levels of precision, which shouldn't happen with data sampled at a single frequency. """ + def __init__(self, func_name='', msg=''): self.func_name = func_name self.msg = msg def __str__(self): return ("Attempted datelabel method '{}' on FXDate " - "placeholder: {}.").format(self.func_name, self.msg) + "placeholder: {}.").format(self.func_name, self.msg) + class FXDateException(MDTFBaseException): """Exception raised when :class:`FXDate` or :class:`FXDateRange` classes, @@ -184,13 +208,15 @@ class FXDateException(MDTFBaseException): time dependence, are accessed like real :class:`Date` or :class:`DateRange` objects. """ + def __init__(self, func_name='', msg=''): self.func_name = func_name self.msg = msg def __str__(self): return ("Attempted datelabel method '{}' on FXDate " - "placeholder: {}.").format(self.func_name, self.msg) + "placeholder: {}.").format(self.func_name, self.msg) + class DataRequestError(MDTFBaseException): """Dummy class used for fatal errors that take place during the @@ -198,12 +224,14 @@ class DataRequestError(MDTFBaseException): """ pass + class MDTFEvent(MDTFBaseException): """Dummy class to denote non-fatal errors, specifically "events" that are passed during the data query/fetch/preprocess stage of the framework. """ pass + class FatalErrorEvent(MDTFBaseException): """Dummy class used to "convert" :class:`MDTFEvent`\s to fatal errors (resulting in deactivation of a variable, pod or case.) via exception @@ -211,11 +239,13 @@ class FatalErrorEvent(MDTFBaseException): """ pass + class DataProcessingEvent(MDTFEvent): """Base class and common formatting code for events raised in data query/fetch. These should *not* be used for fatal errors (when a variable or POD is deactivated.) """ + def __init__(self, msg="", dataset=None): self.msg = msg self.dataset = dataset @@ -230,49 +260,64 @@ def __str__(self): # data_id = str(self.dataset) return self.msg + class DataQueryEvent(DataProcessingEvent): """Exception signaling a failure to find requested data in the remote location. """ pass + class DataExperimentEvent(DataProcessingEvent): """Exception signaling a failure to uniquely select an experiment for all variables based on query results. """ pass + class DataFetchEvent(DataProcessingEvent): """Exception signaling a failure to obtain data from the remote location. """ pass + class DataPreprocessEvent(DataProcessingEvent): """Exception signaling an error in preprocessing data after it's been fetched, but before any PODs run. """ pass + class MetadataEvent(DataProcessingEvent): """Exception signaling discrepancies in variable metadata. """ pass + class MetadataError(MDTFBaseException): """Exception signaling unrecoverable errors in variable metadata. """ pass + class UnitsUndefinedError(MetadataError): """Exception signaling unrecoverable errors in variable metadata. """ pass + class GenericDataSourceEvent(DataProcessingEvent): """Exception signaling a failure originating in the DataSource query/fetch pipeline whose cause doesn't fall into the above categories. """ pass + +class UnsupportedFileTypeError(MDTFBaseException): + """Exception for unsupported file types ingested by the framework + """ + pass + + class PodExceptionBase(MDTFBaseException): """Base class and common formatting code for exceptions affecting a single POD. @@ -297,6 +342,7 @@ def __str__(self): s += "." return s + class PodConfigError(PodExceptionBase): """Exception raised if we can't parse info in a POD's settings.jsonc file. (Covers issues with the file format/schema; malformed JSONC will raise a @@ -305,21 +351,25 @@ class PodConfigError(PodExceptionBase): """ _error_str = "Couldn't parse the settings.jsonc file" + class PodConfigEvent(MDTFEvent): """Exception raised during non-fatal events in resolving POD configuration. """ pass + class PodDataError(PodExceptionBase): """Exception raised if POD doesn't have required data to run. """ _error_str = "Requested data not available" + class PodRuntimeError(PodExceptionBase): """Exception raised if POD doesn't have required resources to run. """ _error_str = "Error in setting the runtime environment" + class PodExecutionError(PodExceptionBase): """Exception raised if POD exits with non-zero retcode or otherwise raises an error during execution. diff --git a/src/util/filesystem.py b/src/util/filesystem.py index 7233a5982..88529e739 100644 --- a/src/util/filesystem.py +++ b/src/util/filesystem.py @@ -3,20 +3,21 @@ """ import os import io -import collections from distutils.spawn import find_executable import glob -import json import re import shutil +import signal import string +import tempfile from . import basic from . import exceptions -# TODO from envyaml import EnvYAML +from . import signal_logger import logging _log = logging.getLogger(__name__) + def abbreviate_path(path, old_base, new_base=None): """Express *path* as a path relative to *old_base*, optionally prepending *new_base*. @@ -27,19 +28,21 @@ def abbreviate_path(path, old_base, new_base=None): str_ = os.path.join(new_base, str_) return str_ -def resolve_path(path, root_path="", env=None, log=_log): + +def resolve_path(rel_path: str, root_path: str = "", env_vars: dict = None, log=_log): """Abbreviation to resolve relative paths, expanding environment variables if necessary. Args: - path (str): Path to resolve. + rel_path (str): Path to resolve. root_path (str): Optional. Root path to resolve `path` with. If not given, resolves relative to :py:func:`os.getcwd`. + env_vars (dict): global environment variables Returns: str: Absolute version of *path*. """ - def _expandvars(path, env_dict): + def _expandvars(path_name: str, env_dict: dict): """Expand quoted variables of the form ``$key`` and ``${key}`` in *path*, where ``key`` is a key in *env_dict*, similar to :py:func:`os.path.expandvars`. @@ -50,27 +53,28 @@ def _expandvars(path, env_dict): return re.sub( r'\$(\w+|\{([^}]*)\})', lambda m: env_dict.get(m.group(2) or m.group(1), m.group(0)), - path + path_name ) - if path == '': - return path # default value set elsewhere - path = os.path.expanduser(path) # resolve '~' to home dir - path = os.path.expandvars(path) # expand $VAR or ${VAR} for shell env_vars - if isinstance(env, dict): - path = _expandvars(path, env) - if '$' in path: - log.warning("Couldn't resolve all env vars in '%s'", path) - return path - if os.path.isabs(path): - return path + if rel_path == "": + return rel_path # default value set elsewhere + rel_path = os.path.expanduser(rel_path) # resolve '~' to home dir + rel_path = os.path.expandvars(rel_path) # expand $VAR or ${VAR} for shell env_vars + if isinstance(env_vars, dict): + rel_path = _expandvars(rel_path, env_vars) + if '$' in rel_path: + log.warning("Couldn't resolve all env vars in '%s'", rel_path) + return rel_path + if os.path.isabs(rel_path): + return rel_path if root_path == "": root_path = os.getcwd() assert os.path.isabs(root_path) - return os.path.normpath(os.path.join(root_path, path)) + return os.path.normpath(os.path.join(root_path, rel_path)) + def recursive_copy(src_files, src_root, dest_root, copy_function=None, - overwrite=False): + overwrite=False): """Copy *src_files* to *dest_root*, preserving relative subdirectory structure. Copies a subset of files in a directory subtree rooted at *src_root* to an @@ -113,13 +117,15 @@ def recursive_copy(src_files, src_root, dest_root, copy_function=None, for src, dest in zip(src_files, dest_files): copy_function(src, dest) + def check_executable(exec_name): """Tests if the executable *exec_name* is found on the current ``$PATH``. Args: exec_name (:py:obj:`str`): Name of the executable to search for. """ - return (find_executable(exec_name) is not None) + return find_executable(exec_name) is not None + def find_files(src_dirs, filename_globs, n_files=None): """Return list of files in *src_dirs*, or any subdirectories, matching any @@ -154,42 +160,42 @@ def find_files(src_dirs, filename_globs, n_files=None): return list(files) -def check_dir(dir_, attr_name="", create=False): +def check_dir(dir_path, attr_name="", create=False): """Check existence of directories. No action is taken for directories that already exist; nonexistent directories either raise a :class:`~util.MDTFFileNotFoundError` or cause the creation of that directory. Args: - dir\_: If a string, the absolute path to check; otherwise, assume the + dir_path: If a string, the absolute path to check; otherwise, assume the path to check is given by the *attr_name* attribute on this object. attr_name: Name of the attribute being checked (used in log messages). create: (bool, default False): if True, nonexistent directories are created. """ - if not isinstance(dir_, str): - dir_ = getattr(dir_, attr_name, None) - if not isinstance(dir_, str): - raise ValueError(f"Expected string, received {repr(dir_)}.") + if not isinstance(dir_path, str): + dir_path = getattr(dir_path, attr_name, None) + if not isinstance(dir_path, str): + raise ValueError(f"Expected string, received {repr(dir_path)}.") try: - if not os.path.isdir(dir_): + if not os.path.isdir(dir_path): if create: - os.makedirs(dir_, exist_ok=False) + os.makedirs(dir_path, exist_ok=False) else: - raise exceptions.MDTFFileNotFoundError(dir_) + raise exceptions.MDTFFileNotFoundError(dir_path) except Exception as exc: if isinstance(exc, FileNotFoundError): path = getattr(exc, 'filename', '') if attr_name: - if not os.path.exists(dir_): + if not os.path.exists(dir_path): raise exceptions.MDTFFileNotFoundError( f"{attr_name} not found at '{path}'.") else: raise exceptions.MDTFFileNotFoundError( - f"{attr_name}: Path '{dir_}' exists but is not a directory.") + f"{attr_name}: Path '{dir_path}' exists but is not a directory.") else: raise exceptions.MDTFFileNotFoundError(path) else: - raise OSError(f"Caught exception when checking {attr_name}={dir_}: {repr(exc)}") \ + raise OSError(f"Caught exception when checking {attr_name}={dir_path}: {repr(exc)}") \ from exc @@ -218,9 +224,9 @@ def _split_version(file_): $ # end of string """, file_, re.VERBOSE) if match: - return (match.group('file_base'), match.group('version')) + return match.group('file_base'), match.group('version') else: - return (file_, '') + return file_, '' def _reassemble(dir_, file_, version, ext_, final_sep): if version: @@ -265,163 +271,14 @@ def _path_exists(dir_list, file_, new_v, ext_, sep): while _path_exists(dir_list, file_, new_v, ext_, final_sep): new_v = new_v + 1 new_path = _reassemble(dir_, file_, new_v, ext_, final_sep) - return (new_path, new_v) - -# --------------------------------------------------------- -# CONFIG FILE PARSING -# --------------------------------------------------------- - -def strip_comments(str_, delimiter=None): - """Remove comments from *str\_*. Comments are taken to start with an - arbitrary *delimiter* and run to the end of the line. - """ - # would be better to use shlex, but that doesn't support multi-character - # comment delimiters like '//' - ESCAPED_QUOTE_PLACEHOLDER = '\v' # no one uses vertical tab - - if not delimiter: - return str_ - lines = str_.splitlines() - for i in range(len(lines)): - # get rid of lines starting with delimiter - if lines[i].startswith(delimiter): - lines[i] = '' - continue - # handle delimiters midway through a line: - # If delimiter appears quoted in a string, don't want to treat it as - # a comment. So for each occurrence of delimiter, count number of - # "s to its left and only truncate when that's an even number. - # First we get rid of \-escaped single "s. - replaced_line = lines[i].replace('\\\"', ESCAPED_QUOTE_PLACEHOLDER) - line_parts = replaced_line.split(delimiter) - quote_counts = [s.count('"') for s in line_parts] - j = 1 - while sum(quote_counts[:j]) % 2 != 0: - if j >= len(quote_counts): - raise ValueError(f"Couldn't parse line {i+1} of string.") - j += 1 - replaced_line = delimiter.join(line_parts[:j]) - lines[i] = replaced_line.replace(ESCAPED_QUOTE_PLACEHOLDER, '\\\"') - # make lookup table of correct line numbers, taking into account lines we - # dropped - line_nos = [i for i, s in enumerate(lines) if (s and not s.isspace())] - # join lines, stripping blank lines - new_str = '\n'.join([s for s in lines if (s and not s.isspace())]) - return (new_str, line_nos) - - -def parse_json(str_): - """Parse JSONC (JSON with ``//``-comments) string *str\_* into a Python object. - Comments are discarded. Wraps standard library :py:func:`json.loads`. - - Syntax errors in the input (:py:class:`~json.JSONDecodeError`) are passed - through from the Python standard library parser. We correct the line numbers - mentioned in the errors to refer to the original file (i.e., with comments.) - """ - def _pos_from_lc(lineno, colno, str_): - # fix line number, since we stripped commented-out lines. JSONDecodeError - # computes line/col no. in error message from character position in string. - lines = str_.splitlines() - return (colno - 1) + sum( (len(line) + 1) for line in lines[:lineno]) + return new_path, new_v - (strip_str, line_nos) = strip_comments(str_, delimiter= '//') - try: - parsed_json = json.loads(strip_str, - object_pairs_hook=collections.OrderedDict) - except json.JSONDecodeError as exc: - # fix reported line number, since we stripped commented-out lines. - assert exc.lineno <= len(line_nos) - raise json.JSONDecodeError( - msg=exc.msg, doc=str_, - pos=_pos_from_lc(line_nos[exc.lineno-1], exc.colno, str_) - ) - except UnicodeDecodeError as exc: - raise json.JSONDecodeError( - msg=f"parse_json received UnicodeDecodeError:\n{exc}", - doc=strip_str, pos=0 - ) - return parsed_json - - -def read_json(file_path, log=_log): - """Reads a struct from a JSONC file at *file_path*. - - Raises: - :class:`~src.util.exceptions.MDTFFileNotFoundError`: If file not found at - *file_path*. - - Returns: - dict: data contained in the file, as parsed by :func:`parse_json`. - - Execution exits with error code 1 on all other exceptions. - """ - log.debug('Reading file %s', file_path) - if not os.path.isfile(file_path): - raise exceptions.MDTFFileNotFoundError(file_path) - try: - with io.open(file_path, 'r', encoding='utf-8') as file_: - str_ = file_.read() - except Exception as exc: - # something more serious than missing file - _log.critical("Caught exception when trying to read %s: %r", file_path, exc) - exit(1) - return parse_json(str_) - -def find_json(dir_, file_name, exit_if_missing=True, log=_log): - """Reads a JSONC file *file_name* anywhere within the root directory *dir\_*. - - Args: - dir\_ (str): Root directory to search (using :func:`find_files`). - file_name (str): Filename to search for. - exit_if_missing (bool): Optional, default True. Exit with error code 1 - if *file_name* not found. - """ - try: - f = find_files(dir_, file_name, n_files=1) - return read_json(f[0]) - except exceptions.MDTFFileNotFoundError: - if exit_if_missing: - _log.critical("Couldn't find file %s in %s.", file_name, dir_) - exit(1) - else: - log.debug("Couldn't find file %s in %s; continuing.", - file_name, dir_) - return dict() - -def write_json(struct, file_path, sort_keys=False, log=_log): - """Serializes *struct* to a JSON file at *file_path*. - - Args: - struct (dict): Object to serialize. - file_path (str): path of the JSON file to write. - """ - log.debug('Writing file %s', file_path) - try: - str_ = json.dumps(struct, - sort_keys=sort_keys, indent=2, separators=(',', ': ')) - with io.open(file_path, 'w', encoding='utf-8') as file_: - file_.write(str_) - except IOError: - _log.critical(f'Fatal IOError when trying to write {file_path}. Exiting.') - exit(1) - - -def pretty_print_json(struct, sort_keys=False): - """Serialize *struct* to a pseudo-YAML string for human-readable debugging - purposes only. Output is not valid JSON (or YAML). - """ - str_ = json.dumps(struct, sort_keys=sort_keys, indent=2) - for char in [',', '{', '}', '[', ']']: - str_ = str_.replace(char, '') - # remove isolated double quotes, but keep "" - str_ = re.sub(r'(?`_ and ``__. """ - flags = re.VERBOSE # matching is case-sensitive, unlike default - delimiter = '{{' # starting delimter is two braces, then apply + flags = re.VERBOSE # matching is case-sensitive, unlike default + delimiter = '{{' # starting delimter is two braces, then apply pattern = r""" \{\{(?: # match delimiter itself, but don't include it # Alternatives for what to do with string following delimiter: @@ -449,8 +306,9 @@ class _DoubleBraceTemplate(string.Template): ) """ + def append_html_template(template_file, target_file, template_dict={}, - create=True, append=True): + create=True, append=True): """Perform substitutions on *template_file* and write result to *target_file*. Variable substitutions are done with custom @@ -479,7 +337,7 @@ def append_html_template(template_file, target_file, template_dict={}, If False, overwrite *target_file* with the substituted contents of *template_file*. """ - assert os.path.exists(template_file) + assert os.path.exists(template_file), f"Template file {template_file} not found" with io.open(template_file, 'r', encoding='utf-8') as f: html_str = f.read() html_str = _DoubleBraceTemplate(html_str).safe_substitute(template_dict) @@ -499,3 +357,61 @@ def append_html_template(template_file, target_file, template_dict={}, mode = 'w' with io.open(target_file, mode, encoding='utf-8') as f: f.write(html_str) + + +class TempDirManager: + _prefix = 'MDTF_temp_' + keep_temp: bool = False + temp_root: str = "" + _dirs: list + _root: str = "" + _unittest: bool = False + + def __init__(self, config): + if hasattr(config, 'unit_test'): + self._unittest = config.unit_test + if not hasattr(config, 'TEMP_DIR_ROOT'): + temp_root = tempfile.gettempdir() + else: + temp_root = config.TEMP_DIR_ROOT + if not self._unittest: + assert os.path.isdir(temp_root), "Could not find temp_root directory" + self._root = temp_root + self._dirs = [] + self.keep_temp = config.get('keep_temp', False) + + # delete temp files if we're killed + signal.signal(signal.SIGTERM, self.tempdir_cleanup_handler) + signal.signal(signal.SIGINT, self.tempdir_cleanup_handler) + + def make_tempdir(self, hash_obj=None): + if hash_obj is None: + new_dir = tempfile.mkdtemp(prefix=self._prefix, dir=self._root) + elif isinstance(hash_obj, str): + new_dir = os.path.join(self._root, self._prefix+hash_obj) + else: + # nicer-looking hash representation + hash_ = hex(hash(hash_obj))[2:] + assert isinstance(hash_, str) + new_dir = os.path.join(self._root, self._prefix+hash_) + if not os.path.isdir(new_dir): + os.makedirs(new_dir) + assert new_dir not in self._dirs + self._dirs.append(new_dir) + return new_dir + + def rm_tempdir(self, path): + assert path in self._dirs + self._dirs.remove(path) + _log.debug("Cleaning up temp dir %s", path) + shutil.rmtree(path) + + def cleanup(self): + if not self.keep_temp and any(self._dirs): + for d in self._dirs: + self.rm_tempdir(d) + + def tempdir_cleanup_handler(self, frame=None, signum=None): + # delete temp files + signal_logger(self.__class__.__name__, signum, frame, log=_log) + self.cleanup() diff --git a/src/util/json_utils.py b/src/util/json_utils.py new file mode 100644 index 000000000..6f272b4a5 --- /dev/null +++ b/src/util/json_utils.py @@ -0,0 +1,176 @@ +"""Utility functions for reading and manipulating json files +""" +import os +import io +import collections +import json +import re +from . import exceptions + +import logging + +_log = logging.getLogger(__name__) + + +def get_config_file_type(file_path: str)->str: + """Verify that configuration file is json or yaml""" + ext = os.path.splitext(file_path)[-1].lower() + + supported_file_types = [".jsonc", ".json", ".yml"] + if ext not in supported_file_types: + raise exceptions.UnsupportedFileTypeError( + f"Unsupported file type. {file_path} must be of type .json(c) or .yml") + return ext + + +def strip_comments(str_, delimiter=None): + """Remove comments from *str\_*. Comments are taken to start with an + arbitrary *delimiter* and run to the end of the line. + """ + # would be better to use shlex, but that doesn't support multi-character + # comment delimiters like '//' + ESCAPED_QUOTE_PLACEHOLDER = '\v' # no one uses vertical tab + + if not delimiter: + return str_ + lines = str_.splitlines() + for i in range(len(lines)): + # get rid of lines starting with delimiter + if lines[i].startswith(delimiter): + lines[i] = '' + continue + # handle delimiters midway through a line: + # If delimiter appears quoted in a string, don't want to treat it as + # a comment. So for each occurrence of delimiter, count number of + # "s to its left and only truncate when that's an even number. + # First we get rid of \-escaped single "s. + replaced_line = lines[i].replace('\\\"', ESCAPED_QUOTE_PLACEHOLDER) + line_parts = replaced_line.split(delimiter) + quote_counts = [s.count('"') for s in line_parts] + j = 1 + while sum(quote_counts[:j]) % 2 != 0: + if j >= len(quote_counts): + raise ValueError(f"Couldn't parse line {i+1} of string.") + j += 1 + replaced_line = delimiter.join(line_parts[:j]) + lines[i] = replaced_line.replace(ESCAPED_QUOTE_PLACEHOLDER, '\\\"') + # make lookup table of correct line numbers, taking into account lines we + # dropped + line_nos = [i for i, s in enumerate(lines) if (s and not s.isspace())] + # join lines, stripping blank lines + new_str = '\n'.join([s for s in lines if (s and not s.isspace())]) + return new_str, line_nos + + +def parse_json(str_): + """Parse JSONC (JSON with ``//``-comments) string *str\_* into a Python object. + Comments are discarded. Wraps standard library :py:func:`json.loads`. + + Syntax errors in the input (:py:class:`~json.JSONDecodeError`) are passed + through from the Python standard library parser. We correct the line numbers + mentioned in the errors to refer to the original file (i.e., with comments.) + """ + def _pos_from_lc(lineno, colno, str_): + # fix line number, since we stripped commented-out lines. JSONDecodeError + # computes line/col no. in error message from character position in string. + lines = str_.splitlines() + return (colno - 1) + sum((len(line) + 1) for line in lines[:lineno]) + + (strip_str, line_nos) = strip_comments(str_, delimiter='//') + try: + parsed_json = json.loads(strip_str, + object_pairs_hook=collections.OrderedDict) + except json.JSONDecodeError as exc: + # fix reported line number, since we stripped commented-out lines. + assert exc.lineno <= len(line_nos) + raise json.JSONDecodeError( + msg=exc.msg, doc=str_, + pos=_pos_from_lc(line_nos[exc.lineno-1], exc.colno, str_) + ) + except UnicodeDecodeError as exc: + raise json.JSONDecodeError( + msg=f"parse_json received UnicodeDecodeError:\n{exc}", + doc=strip_str, pos=0 + ) + + return parsed_json + + +def read_json(file_path, log=_log): + """Reads a struct from a JSONC file at *file_path*. + + Raises: + :class:`~src.util.exceptions.MDTFFileNotFoundError`: If file not found at + *file_path*. + + Returns: + dict: data contained in the file, as parsed by :func:`parse_json`. + + Execution exits with error code 1 on all other exceptions. + """ + log.debug('Reading file %s', file_path) + try: + with io.open(file_path, 'r', encoding='utf-8') as file_: + str_ = file_.read() + except Exception as exc: + # something more serious than missing file + _log.critical("Caught exception when trying to read %s: %r", file_path, exc) + exit(1) + return parse_json(str_) + + +def find_json(file_path, exit_if_missing=True, log=_log): + """Reads a JSONC file + + Args: + file_path (str): Filename to search for. + exit_if_missing (bool): Optional, default True. Exit with error code 1 + if *file_name* not found. + log: log file + """ + try: + os.path.isfile(file_path) + except exceptions.MDTFFileNotFoundError: + if exit_if_missing: + _log.critical("Couldn't find file %s.", file_path) + exit(1) + else: + log.debug("Couldn't find file %s; continuing.", + file_path) + return dict() + return read_json(file_path) + + +def write_json(struct, file_path, sort_keys=False, log=_log): + """Serializes *struct* to a JSON file at *file_path*. + + Args: + struct (dict): Object to serialize. + file_path (str): path of the JSON file to write. + sort_keys (bool): parameter indicating whether to sort keys to pass to json.dumps + log (logging.getlogger): log object + """ + log.debug('Writing file %s', file_path) + try: + str_ = json.dumps(struct, + sort_keys=sort_keys, + indent=2, + separators=(',', ': ')) + with io.open(file_path, 'w', encoding='utf-8') as file_: + file_.write(str_) + except IOError: + _log.critical(f'Fatal IOError when trying to write {file_path}. Exiting.') + exit(1) + + +def pretty_print_json(struct, sort_keys=False): + """Serialize *struct* to a pseudo-YAML string for human-readable debugging + purposes only. Output is not valid JSON (or YAML). + """ + str_ = json.dumps(struct, sort_keys=sort_keys, indent=2) + for char in [',', '{', '}', '[', ']']: + str_ = str_.replace(char, '') + # remove isolated double quotes, but keep "" + str_ = re.sub(r'(?= self.levelno + class LtLevelFilter(_LevelFilterBase): """:py:class:`~logging.Filter` to include only log messages with a severity less than *level*. @@ -242,6 +256,7 @@ class LtLevelFilter(_LevelFilterBase): def filter(self, record): return record.levelno < self.levelno + class EqLevelFilter(_LevelFilterBase): """:py:class:`~logging.Filter` to include only log messages with a severity equal to *level*. @@ -249,6 +264,7 @@ class EqLevelFilter(_LevelFilterBase): def filter(self, record): return record.levelno == self.levelno + class NameMatchFilter(logging.Filter): """:py:class:`~logging.Filter` that only accepts log events directed to it specifically, rejecting all events coming from child loggers. @@ -263,8 +279,10 @@ def __init__(self, name=""): def filter(self, record): return (record.name == self._name) + OBJ_LOG_TAG_ATTR_NAME = 'tags' + class TagMatchFilter(logging.Filter): """:py:class:`~logging.Filter` which only accepts records having the designated combination of custom 'tag' attributes. These are assigned by the @@ -285,6 +303,7 @@ def filter(self, record): # ------------------------------------------------------------------------------ + # standardize OBJ_LOG_ROOT = 'MDTF' # "root logger" of the object logger hierarchy ObjectLogTag = basic.MDTFEnum( @@ -298,6 +317,7 @@ def filter(self, record): the logging events. """ + class MDTFObjectLogger(logging.Logger): """This class wraps functionality for use by :class:`MDTFObjectLoggerMixin` for log record-keeping by objects in the object hierarchy: @@ -312,6 +332,7 @@ def __init__(self, name): super(MDTFObjectLogger, self).__init__(name) self._exceptions = [] self._tracebacks = [] + self._log_buffer = io.StringIO() def log(self, level, msg, *args, **kw): # add "tags" attribute to all emitted LogRecords @@ -360,7 +381,7 @@ def has_exceptions(self): """Return boolean corresponding to whether this object has received any exceptions (via :meth:`store_exception`.) """ - return (len(self._exceptions) > 0) + return len(self._exceptions) > 0 def store_exception(self, exc): """Add an Exception object *exc* to the internal list. @@ -370,7 +391,7 @@ def store_exception(self, exc): self._tracebacks.append(tb_exc) @classmethod - def get_logger(cls, log_name): + def get_logger(cls, log_name) -> logging.getLogger(): """Workaround for setting the logger class, since logger objects have global state (calling getLogger with the same name returns the same object, like a Singleton.) @@ -384,12 +405,18 @@ def get_logger(cls, log_name): log.setLevel(logging.NOTSET) return log -class MDTFObjectLoggerMixinBase(): + def close_log_file(self, log=True): + self.log._log_handler.close() + self.log._log_handler = None + + +class MDTFObjectLoggerMixinBase: """Dummy base class acting as a parent for all logging mixin classes for elements of the object hierarchy. """ pass + class MDTFObjectLoggerMixin(MDTFObjectLoggerMixinBase): """Base class to implement per-object logging for objects in the object hierarchy. Based on ``__. @@ -403,19 +430,24 @@ class MDTFObjectLoggerMixin(MDTFObjectLoggerMixinBase): This is intended for preparing per-POD and per-case log files; logging intended for the console should use the module loggers. """ - def init_log(self, fmt=None): + def init_log(self, fmt=None, log_dir=None): """Logger initialization. This is a mixin class, so we don't define a ``__init__`` method for simplicity. """ if fmt is None: fmt = '%(levelname)s: %(message)s' - - assert hasattr(self, 'log') - self._log_handler = StringIOHandler() - # don't record events from children in StringIO buffer - self._log_handler.addFilter(NameMatchFilter(self._log_name)) - formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S') - self._log_handler.setFormatter(formatter) + assert hasattr(self, 'log'), 'class is missing required `log` attribute' + if log_dir is not None: + self._log_handler = MDTFHeaderFileHandler( + filename=os.path.join(log_dir, f"{self.name}.log"), + mode="w", encoding="utf-8" + ) + else: + self._log_handler = StringIOHandler() + # don't record events from children in StringIO buffer + self._log_handler.addFilter(NameMatchFilter(self._log_name)) + formatter = logging.Formatter(fmt=fmt, datefmt='%H:%M:%S') + self._log_handler.setFormatter(formatter) self.log.addHandler(self._log_handler) self.init_extra_log_handlers() @@ -445,11 +477,11 @@ def format_log(self, children=True): # list exceptions before anything else: if self.log.has_exceptions: exc_strs = [''.join(exc.format()) for exc in self.log._tracebacks] - exc_strs = [f"*** caught exception (#{i+1}):\n{exc}" \ - for i, exc in enumerate(exc_strs)] + exc_strs = [f"*** caught exception (#{i+1}):\n{exc}" + for i, exc in enumerate(exc_strs)] str_ += "".join(exc_strs) + '\n' # then log contents: - str_ += self._log_handler.buffer_contents().rstrip() + #str_ += self._log_handler.buffer_contents().rstrip() # then contents of children: if children: str_ += '\n' @@ -461,6 +493,7 @@ def format_log(self, children=True): str_ += f"<{child} log placeholder>\n" return _hanging_indent(str_, 0, 4) + '\n' + class VarlistEntryLoggerMixin(MDTFObjectLoggerMixin): """Mixin providing per-object logging for :class:`~diagnostic.VarlistEntry`. """ @@ -475,7 +508,8 @@ def init_extra_log_handlers(self): self._nc_history_log.setFormatter(formatter) self.log.addHandler(self._nc_history_log) -class _CaseAndPODHandlerMixin(): + +class _CaseAndPODHandlerMixin: """Common methods for providing per-object logging for :class:`PODLoggerMixin` and :class:`CaseLoggerMixin`. """ @@ -500,13 +534,14 @@ def init_extra_log_handlers(self): self._out_file_log.setFormatter(formatter) self.log.addHandler(self._out_file_log) + class PODLoggerMixin(_CaseAndPODHandlerMixin, MDTFObjectLoggerMixin): """Mixin providing per-object logging for :class:`~diagnostic.Diagnostic` (POD objects.) """ - pass -class CaseLoggerMixin(_CaseAndPODHandlerMixin, MDTFObjectLoggerMixinBase): + +class CaseLoggerMixin(_CaseAndPODHandlerMixin, MDTFObjectLoggerMixin): """Mixin providing per-object logging for :class:`~data_manager.DataSourceBase` (case objects, corresponding to experiments.) """ @@ -515,22 +550,33 @@ def init_log(self, log_dir, fmt=None): # NB: no super(); redefining the method if fmt is None: fmt = ("%(asctime)s %(levelname)s: %(funcName)s (%(filename)s line " - "%(lineno)d):\n%(message)s") + "%(lineno)d):\n%(message)s") assert hasattr(self, 'log') self.log.propagate = True self.log.setLevel(logging.DEBUG) - if self.log.hasHandlers(): - for handler in self.log.handlers: - self.log.removeHandler(handler) - self._log_handler = MDTFHeaderFileHandler( - filename=os.path.join(log_dir, f"{self.name}.log"), - mode="w", encoding="utf-8" - ) formatter = HangingIndentFormatter( fmt=fmt, datefmt='%H:%M:%S', header="", footer="\n" ) + if log_dir is not None: + self._log_handler = MDTFHeaderFileHandler( + filename=os.path.join(log_dir, f"{self.name}.log"), + mode="w", encoding="utf-8" + ) + else: + self._log_handler = StringIOHandler() + # don't record events from children in StringIO buffer + self._log_handler.addFilter(NameMatchFilter(self._log_name)) + self._log_handler.setFormatter(formatter) + if self.log.hasHandlers(): + for handler in self.log.handlers: + self.log.removeHandler(handler) + self._log_handler = MDTFHeaderFileHandler( + filename=os.path.join(log_dir, f"{self.name}.log"), + mode="w", encoding="utf-8" + ) + self._log_handler.setFormatter(formatter) self.log.addHandler(self._log_handler) @@ -545,6 +591,7 @@ def close_log_file(self, log=True): # ------------------------------------------------------------------------------ + def git_info(): """Get the current git branch, hash, and list of uncommitted files, if available. Based on NumPy's implementation: ``__. @@ -583,6 +630,7 @@ def _minimal_ext_cmd(cmd): git_hash = "" return (git_branch, git_hash, git_dirty) + def mdtf_log_header(title): """Returns string of system debug information to use as log file header. Calls :func:`git_info` to get repo status. @@ -605,19 +653,21 @@ def mdtf_log_header(title): str_ = f"ERROR: {err_str}\n" return str_ + (80 * '-') + '\n\n' -def signal_logger(caller_name, signum=None, frame=None, log=_log): + +def signal_logger(caller_name: str, signum=None, frame=None, log=_log): """Lookup signal name from number and write to log. Taken from ``__. Args: caller_name (str): Calling function name, only used in log message. - signum: Signal number of the signal we recieved. - frame: Parameters of the signal we recieved. + signum: Signal number of the signal we received. + frame: parameters of received signal + log: log file """ if signum: sig_lookup = { - k:v for v, k in reversed(sorted(list(signal.__dict__.items()))) \ - if v.startswith('SIG') and not v.startswith('SIG_') + k: v for v, k in reversed(sorted(list(signal.__dict__.items()))) + if v.startswith('SIG') and not v.startswith('SIG_') } log.info( "%s caught signal %s (%s)", @@ -626,6 +676,7 @@ def signal_logger(caller_name, signum=None, frame=None, log=_log): else: log.info("%s caught unknown signal.", caller_name) + def _set_excepthook(root_logger): """Ensure all uncaught exceptions, other than user KeyboardInterrupt, are logged to the root logger. @@ -644,6 +695,7 @@ def uncaught_exception_handler(exc_type, exc_value, exc_traceback): sys.excepthook = uncaught_exception_handler + def _configure_logging_dict(log_d, log_args): """Convert CLI flags (``--verbose``/``--quiet``) into log levels. Configure log level and filters on console handlers in a logging config dictionary. @@ -679,6 +731,7 @@ def _configure_logging_dict(log_d, log_args): log_d['root']['handlers'] = ["stderr"] return log_d + def initial_log_config(): """Configure the root logger for logging to console and to a cache provided by :class:`MultiFlushMemoryHandler`. @@ -748,6 +801,7 @@ def initial_log_config(): logging.config.dictConfig(log_d) root_logger.debug('Console loggers configured.') + def transfer_log_cache(target_log=None, close=False): """Transfer the contents of the root log cache (:class:`MultiFlushMemoryHandler`) to logs on newly-configured objects. @@ -766,3 +820,118 @@ def transfer_log_cache(target_log=None, close=False): # delete it temp_log_cache.close() root_logger.removeHandler(temp_log_cache) + + +@dataclass.mdtf_dataclass +class MDTFObjectBase(metaclass=basic.MDTFABCMeta): + """Base class providing shared functionality for the case object hierarchy, which is: + + - :class:`~data_manager.DataSourceBase`\s belonging to a run of the package; + - :class:`~pod_setup.PODObject`\s POD belonging to a + :class:`~data_manager.DataSourceBase`; + - :class:`~diagnostic.VarlistEntry`\s (requested model variables) belonging + to a :class:`~diagnostic.Diagnostic`. + """ + _id: basic.MDTF_ID = None + name: str = dataclass.MANDATORY + _parent: typing.Any = dc.field(default=dataclass.MANDATORY, compare=False) + status: basic.ObjectStatus = dc.field(default=basic.ObjectStatus.NOTSET, compare=False) + + def __post_init__(self): + if self._id is None: + # assign unique ID # so that we don't need to rely on names being unique + self._id = basic.MDTF_ID() + # init object-level logger + self.log = MDTFObjectLogger.get_logger(self._log_name) + + # the @property decorator allows us to attach code to designated attribute, such as getter and setter methods + @property + def _log_name(self): + if self._parent is None: + return OBJ_LOG_ROOT # framework: root of tree + else: + _log_name = f"{self.name}_{self._id}".replace('.', '_') + return f"{self._parent._log_name}.{_log_name}" + + @property + def full_name(self): + return f"<#{self._id}:{self._parent.name}.{self.name}>" + + def __hash__(self): + return self._id.__hash__() + + @property + def failed(self): + return self.status == basic.ObjectStatus.FAILED # abbreviate + + @property + def active(self): + return self.status == basic.ObjectStatus.ACTIVE # abbreviate + + @property + @abc.abstractmethod + def _children(self): + """Iterable of child objects associated with this object.""" + pass + + # This is a figurative "birth" routine that generates an object full of child objects + def iter_children(self, child_type=None, status=None, status_neq=None): + """Generator iterating over child objects associated with this object. + + Args: + child_type: None or Type `type`; default None. If None, iterates over + all child objects regardless of their type + status: None or :class:`ObjectStatus`, default None. If None, + iterates over all child objects, regardless of status. If a + :class:`ObjectStatus` value is passed, only iterates over + child objects with that status. + status_neq: None or :class:`ObjectStatus`, default None. If set, + iterates over child objects which *don't* have the given status. + If *status* is set, this setting is ignored. + """ + iter_ = self._children + if child_type is not None: # return the iter_ elements that match a specified child_type + iter_ = filter((lambda x: isinstance(x, child_type)), iter_) + if status is not None: # return the iter_ elements that match the specified status + iter_ = filter((lambda x: x.status == status), iter_) + elif status_neq is not None: # return the iter elements that do NOT match status_neq + iter_ = filter((lambda x: x.status != status_neq), iter_) + yield from iter_ + + def child_deactivation_handler(self, child, exc): + # needs to test for child_type + pass + + def child_status_update(self, exc=None): + if next(self.iter_children(), None) is None: + # should never get here (no children of any status), because this + # method should only be called by children + raise ValueError(f"Children misconfigured for {self.full_name}.") + + # if all children have failed, deactivate self + if not self.failed and \ + next(self.iter_children(status_neq=basic.ObjectStatus.FAILED), None) is None: + self.deactivate(exceptions.ChildFailureEvent(self), level=None) + + # level at which to log deactivation events + _deactivation_log_level = logging.ERROR + + def deactivate(self, exc, level=None): + # always log exceptions, even if we've already failed + self.log.store_exception(exc) + + if not (self.failed or self.status == basic.ObjectStatus.SUCCEEDED): + # only need to log and update on status change for still-active objs + if level is None: + level = self._deactivation_log_level # default level for child class + self.log.log(level, "Deactivated %s due to %r.", self.full_name, exc) + + # update status on self + self.status = basic.ObjectStatus.FAILED + if self._parent is not None: + # call handler on parent, which may change parent and/or siblings + self._parent.child_deactivation_handler(self, exc) + self._parent.child_status_update() + # update children (deactivate all) + for obj in self.iter_children(status_neq=basic.ObjectStatus.FAILED): + obj.deactivate(exceptions.PropagatedEvent(exc=exc, parent=self), level=None) diff --git a/src/util/path_utils.py b/src/util/path_utils.py new file mode 100644 index 000000000..757bd8998 --- /dev/null +++ b/src/util/path_utils.py @@ -0,0 +1,175 @@ +"""Utility functions for defining directory paths +""" +import os +from . import check_dir +from . import exit_handler +from . import Singleton +from . import NameSpace +from . import from_iter +from . import resolve_path +import shutil +from . import filesystem +import logging +_log = logging.getLogger(__name__) + + +class PathManagerBase(metaclass=Singleton): + """:class:`~util.Singleton` holding the root directories for all paths used + by the code. + """ + WORK_DIR: str + OUTPUT_DIR: str + TEMP_DIR_ROOT: str + CODE_ROOT: str + OBS_DATA_ROOT: str + _unittest: bool = False + + def __init__(self, config: NameSpace = None, + env: dict = None, + unittest: bool = False, + new_work_dir: bool = True): + self._unittest = unittest + if self._unittest: + for path in ['CODE_ROOT', 'OBS_DATA_ROOT', + 'WORK_DIR', 'OUTPUT_DIR']: + setattr(self, path, 'TEST_' + path) + self.TEMP_DIR_ROOT = self.WORK_DIR + else: + # normal code path + self.CODE_ROOT = config.CODE_ROOT + assert os.path.isdir(self.CODE_ROOT) + + # set following explicitly: redundant, but keeps linter from complaining + if hasattr(config, "OBS_DATA_ROOT"): + self.OBS_DATA_ROOT = self._init_path('OBS_DATA_ROOT', config, env=env) + if 'MDTF_output' not in config.WORK_DIR: + self.WORK_DIR = os.path.join(self._init_path('WORK_DIR', config, env=env), + 'MDTF_output') + else: + self.WORK_DIR = os.path.join(self._init_path('WORK_DIR', config, env=env)) + + if not hasattr(config, 'OUTPUT_DIR'): + self.OUTPUT_DIR = self.WORK_DIR + elif len(config['OUTPUT_DIR']) < 1: + self.OUTPUT_DIR = self.WORK_DIR + else: + if 'MDTF_output' not in config.WORK_DIR: + self.OUTPUT_DIR = os.path.join(self._init_path('OUTPUT_DIR', config, env=env), + 'MDTF_output') + else: + self.OUTPUT_DIR = os.path.join(self._init_path('OUTPUT_DIR', config, env=env)) + + if new_work_dir: + self.WORK_DIR, ver = filesystem.bump_version( + self.WORK_DIR, extra_dirs=[self.OUTPUT_DIR]) + self.OUTPUT_DIR, _ = filesystem.bump_version(self.OUTPUT_DIR, new_v=ver) + + # set root directory for TempDirManager + if not getattr(self, 'TEMP_DIR_ROOT', ''): + if env is not None and 'MDTF_TMPDIR' in env: + self.TEMP_DIR_ROOT = env['MDTF_TMPDIR'] + else: + # default to writing temp files in working directory + self.TEMP_DIR_ROOT = self.WORK_DIR + + def _init_path(self, key, d, env=None): + if self._unittest: # use in unit testing only + return 'TEST_'+key + else: + # need to check existence in case we're being called directly + if not d.get(key, False): + _log.fatal(f"Error: {key} not initialized.") + exit_handler(code=1) + return resolve_path( + from_iter(d[key]), root_path=self.CODE_ROOT, + env_vars=env, + log=_log + ) + + +class PodPathManager(PathManagerBase): + POD_WORK_DIR: str + POD_OUTPUT_DIR: str + POD_OBS_DATA: str + POD_CODE_DIR: str + + def setup_pod_paths(self, pod_name: str): + """Check and create directories specific to this POD. + """ + + self.POD_CODE_DIR = os.path.join(self.CODE_ROOT, 'diagnostics', pod_name) + self.POD_WORK_DIR = os.path.join(self.WORK_DIR, pod_name) + self.POD_OUTPUT_DIR = os.path.join(self.OUTPUT_DIR, pod_name) + if any(self.OBS_DATA_ROOT): + self.POD_OBS_DATA = os.path.join(self.OBS_DATA_ROOT, pod_name) + filesystem.check_dir(self.POD_WORK_DIR, 'POD_WORK_DIR', create=True) + filesystem.check_dir(self.POD_OUTPUT_DIR, 'POD_OUTPUT_DIR', create=True) + # OBS data are unique to POD, so the obs output is copied to the POD subdirectory + dirs = ('model/PS', 'obs/PS', 'obs/netCDF') + for d in dirs: + filesystem.check_dir(os.path.join(self.POD_WORK_DIR, d), create=True) + + +class ModelDataPathManager(PathManagerBase): + MODEL_DATA_ROOT: str + MODEL_DATA_DIR: dict + MODEL_WORK_DIR: dict + MODEL_OUTPUT_DIR: dict + + def __init__(self, config: NameSpace, + env=None, + unittest: bool = False, + new_work_dir: bool = False): + super().__init__(config, env, unittest, new_work_dir) + + if hasattr(config, "MODEL_DATA_ROOT"): + self.MODEL_DATA_ROOT = self._init_path('MODEL_DATA_ROOT', config, env=env) + self.MODEL_DATA_DIR = dict() + self.MODEL_OUTPUT_DIR = dict() + self.MODEL_WORK_DIR = dict() + else: + self.MODEL_DATA_ROOT = "" + + def setup_data_paths(self, case_list: NameSpace): + # define directory paths for multirun mode + # Each case directory is a subdirectory in wk_dir/pod_name + for case_name, case_dict in case_list.items(): + if case_dict.startdate in case_name and case_dict.enddate in case_name: + case_wk_dir = 'MDTF_{}'.format(case_name) + else: + startdate = case_dict.startdate.format(precision=1) + enddate = case_dict.enddate.format(precision=1) + case_wk_dir = 'MDTF_{}_{}_{}'.format(case_name, startdate, enddate) + # TODO: Remove refs to MODEL_DATA_ROOT when catalogs are implemented in + # older PODs + # Model data DIR retained for backwards compatibility + if len(self.MODEL_DATA_ROOT) > 1: + self.MODEL_DATA_DIR[case_name] = os.path.join(self.MODEL_DATA_ROOT, case_name) + filesystem.check_dir(self.MODEL_DATA_DIR[case_name], 'MODEL_DATA_DIR', create=False) + # Cases are located in a common POD directory + self.MODEL_WORK_DIR[case_name] = os.path.join(self.WORK_DIR, case_wk_dir) + self.MODEL_OUTPUT_DIR[case_name] = os.path.join(self.OUTPUT_DIR, case_wk_dir) + + filesystem.check_dir(self.MODEL_WORK_DIR[case_name], 'MODEL_WORK_DIR', create=True) + filesystem.check_dir(self.MODEL_OUTPUT_DIR[case_name], 'MODEL_OUTPUT_DIR', create=True) + + +def verify_paths(config, p): + # needs to be here, instead of PathManager, because we subclass it in + # NOAA_GFDL + keep_temp = config.get('keep_temp', False) + # clean out WORKING_DIR if we're not keeping temp files: + if os.path.exists(p.WORK_DIR) and not \ + (keep_temp or p.WORK_DIR == p.OUTPUT_DIR): + shutil.rmtree(p.WORK_DIR) + + try: + check_dirs = (('CODE_ROOT', False), ('MODEL_DATA_ROOT', True), ('WORK_DIR', True)) + if hasattr(config, 'OBS_DATA_ROOT'): + check_dirs.append('OBS_DATA_ROOT', False) + for dir_name, create_ in check_dirs: + check_dir(p, dir_name, create=create_) + except Exception as exc: + _log.fatal((f"Input settings for {dir_name} mis-specified (caught " + f"{repr(exc)}.)")) + exit_handler(code=1) diff --git a/src/util/processes.py b/src/util/processes.py index aaeeaf831..6921e9034 100644 --- a/src/util/processes.py +++ b/src/util/processes.py @@ -11,11 +11,15 @@ import logging _log = logging.getLogger(__name__) + class ExceptionPropagatingThread(threading.Thread): """Class to propagate exceptions raised in a child thread back to the caller thread when the child is join()ed. Adapted from ``__. """ + ret = None + exc = None + def run(self): self.ret = None self.exc = None @@ -54,7 +58,7 @@ def poll_command(command, shell=False, env=None): return rc -def run_command(command, env=None, cwd=None, timeout=0, dry_run=False, log=_log): +def run_command(command, env=None, cwd=None, timeout=0, log=_log): """Subprocess wrapper to facilitate running a single command without starting a shell. @@ -89,9 +93,6 @@ def _timeout_handler(signum, frame): if isinstance(command, str): command = shlex.split(command) cmd_str = ' '.join(command) - if dry_run: - log.info('DRY_RUN: call %s', cmd_str) - return proc = None pid = None retcode = 1 @@ -128,15 +129,17 @@ def _timeout_handler(signum, frame): else: return stdout.splitlines() -def run_shell_command(command, env=None, cwd=None, dry_run=False, log=_log): + +def run_shell_command(command, env=None, cwd=None, log=_log): """Subprocess wrapper to facilitate running shell commands. See documentation for :py:class:`~subprocess.Popen`. Args: - commands (list of str): List of commands to execute. + command (list of str): List of commands to execute. env (dict): Optional. Environment variables to set. cwd (str): Optional. Child processes' working directory. Default is `None`, which uses the current working directory. + log: log file Returns: List of str containing output that was written to stdout @@ -157,20 +160,16 @@ def run_shell_command(command, env=None, cwd=None, dry_run=False, log=_log): if not isinstance(command, str): command = ' '.join(command) - if dry_run: - log.info('DRY_RUN: call %s', command) - return proc = None pid = None retcode = 1 stderr = '' try: - proc = subprocess.Popen( - command, - shell=True, executable=bash_exec, - env=env, cwd=cwd, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - universal_newlines=True, bufsize=1 + proc = subprocess.Popen(command, shell=True, + executable=bash_exec, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=env, universal_newlines=True, + cwd=cwd, text=True, bufsize=1 ) pid = proc.pid (stdout, stderr) = proc.communicate() diff --git a/src/util/tests/test_basic.py b/src/util/tests/test_basic.py index ea7c15c93..692346e6d 100644 --- a/src/util/tests/test_basic.py +++ b/src/util/tests/test_basic.py @@ -1,8 +1,8 @@ import unittest -import unittest.mock as mock from src.util import basic as util from src.util import exceptions + class TestMDTFABCMeta(unittest.TestCase): def test_abstract_attribute(self): class Foo(metaclass=util.MDTFABCMeta): @@ -25,15 +25,18 @@ class GoodChildClass(Foo): class BadChildClass(Foo): pass + with self.assertRaises(NotImplementedError): b = BadChildClass() + class TestSingleton(unittest.TestCase): def test_singleton(self): # Can only be instantiated once class Temp1(util.Singleton): def __init__(self): self.foo = 0 + temp1 = Temp1() temp2 = Temp1() temp1.foo = 5 @@ -44,40 +47,42 @@ def test_singleton_reset(self): class Temp2(util.Singleton): def __init__(self): self.foo = 0 + temp1 = Temp2() temp1.foo = 5 temp1._reset() temp2 = Temp2() self.assertEqual(temp2.foo, 0) + class TestMultiMap(unittest.TestCase): def test_multimap_inverse(self): # test inverse map - temp = util.MultiMap({'a':1, 'b':2}) + temp = util.MultiMap({'a': 1, 'b': 2}) temp_inv = temp.inverse() self.assertIn(1, temp_inv) self.assertEqual(temp_inv[2], set(['b'])) def test_multimap_setitem(self): # test key addition and handling of duplicate values - temp = util.MultiMap({'a':1, 'b':2}) + temp = util.MultiMap({'a': 1, 'b': 2}) temp['c'] = 1 temp_inv = temp.inverse() self.assertIn(1, temp_inv) - self.assertCountEqual(temp_inv[1], set(['a','c'])) + self.assertCountEqual(temp_inv[1], set(['a', 'c'])) temp['b'] = 3 temp_inv = temp.inverse() self.assertNotIn(2, temp_inv) def test_multimap_delitem(self): # test item deletion - temp = util.MultiMap({'a':1, 'b':2}) + temp = util.MultiMap({'a': 1, 'b': 2}) del temp['b'] temp_inv = temp.inverse() self.assertNotIn(2, temp_inv) def test_multimap_add(self): - temp = util.MultiMap({'a':1, 'b':2, 'c':1}) + temp = util.MultiMap({'a': 1, 'b': 2, 'c': 1}) temp['a'].add(3) temp_inv = temp.inverse() self.assertIn(3, temp_inv) @@ -85,34 +90,35 @@ def test_multimap_add(self): temp['a'].add(2) temp_inv = temp.inverse() self.assertIn(2, temp_inv) - self.assertCountEqual(temp_inv[2], set(['a','b'])) + self.assertCountEqual(temp_inv[2], set(['a', 'b'])) def test_multimap_add_new(self): - temp = util.MultiMap({'a':1, 'b':2, 'c':1}) + temp = util.MultiMap({'a': 1, 'b': 2, 'c': 1}) temp['x'].add(2) temp_inv = temp.inverse() self.assertIn(2, temp_inv) - self.assertCountEqual(temp_inv[2], set(['b','x'])) + self.assertCountEqual(temp_inv[2], set(['b', 'x'])) def test_multimap_remove(self): - temp = util.MultiMap({'a':1, 'b':2, 'c':1}) + temp = util.MultiMap({'a': 1, 'b': 2, 'c': 1}) temp['c'].add(2) temp['c'].remove(1) temp_inv = temp.inverse() self.assertIn(2, temp_inv) - self.assertCountEqual(temp_inv[2], set(['b','c'])) + self.assertCountEqual(temp_inv[2], set(['b', 'c'])) self.assertIn(1, temp_inv) self.assertCountEqual(temp_inv[1], set(['a'])) + class TestWormDict(unittest.TestCase): def test_worm_normal_operation(self): foo = util.WormDict(a=1, b=2) - self.assertTrue(isinstance(foo, dict)) # should really be testing for MutableMapping + self.assertTrue(isinstance(foo, dict)) # should really be testing for MutableMapping self.assertEqual(foo['b'], 2) # all dicts are OrderedDicts starting with 3.7 - self.assertEqual(tuple(foo.keys()), ('a','b')) - self.assertEqual(tuple(foo.values()), (1,2)) - self.assertEqual(tuple(str(k) + str(v) for k,v in foo.items()), ('a1', 'b2')) + self.assertEqual(tuple(foo.keys()), ('a', 'b')) + self.assertEqual(tuple(foo.values()), (1, 2)) + self.assertEqual(tuple(str(k) + str(v) for k, v in foo.items()), ('a1', 'b2')) foo.update({'c': 3, 'd': 4}) self.assertEqual(foo['d'], 4) self.assertEqual(len(foo), 4) @@ -138,12 +144,12 @@ def test_worm_delete(self): def test_consistentD_normal_operation(self): foo = util.ConsistentDict(a=1, b=2) - self.assertTrue(isinstance(foo, dict)) # should really be testing for MutableMapping + self.assertTrue(isinstance(foo, dict)) # should really be testing for MutableMapping self.assertEqual(foo['b'], 2) # all dicts are OrderedDicts starting with 3.7 - self.assertEqual(tuple(foo.keys()), ('a','b')) - self.assertEqual(tuple(foo.values()), (1,2)) - self.assertEqual(tuple(str(k) + str(v) for k,v in foo.items()), ('a1', 'b2')) + self.assertEqual(tuple(foo.keys()), ('a', 'b')) + self.assertEqual(tuple(foo.values()), (1, 2)) + self.assertEqual(tuple(str(k) + str(v) for k, v in foo.items()), ('a1', 'b2')) foo.update({'c': 3, 'd': 4}) self.assertEqual(foo['d'], 4) self.assertEqual(len(foo), 4) @@ -241,6 +247,7 @@ def test_namespace_hash(self): self.assertIn(test, set_test) self.assertIn(test4, set_test) + class TestMDTFEnum(unittest.TestCase): def test_to_string(self): Dummy = util.MDTFEnum('Dummy', 'VALUE ANOTHER_VALUE') @@ -258,70 +265,50 @@ def test_eq_coercion(self): Dummy = util.MDTFEnum('Dummy', 'VALUE ANOTHER_VALUE') self.assertEqual(Dummy.VALUE, Dummy.VALUE) self.assertNotEqual(Dummy.ANOTHER_VALUE, Dummy.VALUE) - #self.assertEqual(Dummy.VALUE, 'value') - #self.assertEqual('value', Dummy.VALUE) - #self.assertNotEqual('another_value', Dummy.VALUE) - #self.assertNotEqual(Dummy.VALUE, 'another_value') + # self.assertEqual(Dummy.VALUE, 'value') + # self.assertEqual('value', Dummy.VALUE) + # self.assertNotEqual('another_value', Dummy.VALUE) + # self.assertNotEqual(Dummy.VALUE, 'another_value') - def test_int_enum(self): - Dummy = util.MDTFIntEnum('Dummy', 'ONE TWO THREE') - self.assertTrue(Dummy.ONE < Dummy.TWO) - self.assertTrue(Dummy.THREE >= Dummy.TWO) - self.assertNotEqual(Dummy.ONE, Dummy.TWO) class TestSpliceIntoList(unittest.TestCase): def test_splice_into_list_start(self): - list_ = ['a','b','c'] - ans = util.splice_into_list(list_, {'a':['a1']}) + list_ = ['a', 'b', 'c'] + ans = util.splice_into_list(list_, {'a': ['a1']}) self.assertEqual(ans, ['a', 'a1', 'b', 'c']) def test_splice_into_list_middle(self): - list_ = ['a','b','c'] - ans = util.splice_into_list(list_, {'b':['b1']}) + list_ = ['a', 'b', 'c'] + ans = util.splice_into_list(list_, {'b': ['b1']}) self.assertEqual(ans, ['a', 'b', 'b1', 'c']) def test_splice_into_list_end(self): - list_ = ['a','b','c'] - ans = util.splice_into_list(list_, {'c':['c1']}) + list_ = ['a', 'b', 'c'] + ans = util.splice_into_list(list_, {'c': ['c1']}) self.assertEqual(ans, ['a', 'b', 'c', 'c1']) def test_splice_into_list_multi(self): - list_ = ['a','b','a'] - ans = util.splice_into_list(list_, {'a':['a1'], 'c':['c1']}) + list_ = ['a', 'b', 'a'] + ans = util.splice_into_list(list_, {'a': ['a1'], 'c': ['c1']}) self.assertEqual(ans, ['a', 'a1', 'b', 'a', 'a1']) def test_splice_into_list_keyfn(self): - list_ = ['aaa','bXX','bYY','c','dXX','bZZ'] + list_ = ['aaa', 'bXX', 'bYY', 'c', 'dXX', 'bZZ'] key_fn = (lambda s: s[0]) - splice_d = {'a':['a1'], 'b':['b1'], 'd':['d1'],'g':['g1']} + splice_d = {'a': ['a1'], 'b': ['b1'], 'd': ['d1'], 'g': ['g1']} ans = util.splice_into_list(list_, splice_d, key_fn) self.assertEqual(ans, - ['aaa', 'a1', 'bXX', 'b1', 'bYY', 'b1', 'c', 'dXX', 'd1', 'bZZ', 'b1'] - ) + ['aaa', 'a1', 'bXX', 'b1', 'bYY', 'b1', 'c', 'dXX', 'd1', 'bZZ', 'b1'] + ) def test_splice_into_list_general(self): - list_ = ['a','b','b','c','d','b'] - splice_d = {'a':['a1','a2'], 'b':['b1'], 'd':['d1'],'g':['g1']} + list_ = ['a', 'b', 'b', 'c', 'd', 'b'] + splice_d = {'a': ['a1', 'a2'], 'b': ['b1'], 'd': ['d1'], 'g': ['g1']} ans = util.splice_into_list(list_, splice_d) self.assertEqual(ans, - ['a', 'a1', 'a2', 'b', 'b1', 'b', 'b1', 'c', 'd', 'd1', 'b', 'b1'] - ) - -class TestSerializeClass(unittest.TestCase): - def test_deserialize_builtin(self): - cls_ = util.deserialize_class('list') - self.assertEqual(cls_, list) - cls_ = util.deserialize_class('str') - self.assertEqual(cls_, str) - cls_ = util.deserialize_class('int') - self.assertEqual(cls_, int) - - def test_deserialize_user(self): - class Dummy(object): - pass - cls_ = util.deserialize_class('Dummy') - self.assertEqual(cls_, Dummy) -# --------------------------------------------------- + ['a', 'a1', 'a2', 'b', 'b1', 'b', 'b1', 'c', 'd', 'd1', 'b', 'b1'] + ) + if __name__ == '__main__': unittest.main() diff --git a/src/util/tests/test_dataclass.py b/src/util/tests/test_dataclass.py index bf055e0b9..d972be9a1 100644 --- a/src/util/tests/test_dataclass.py +++ b/src/util/tests/test_dataclass.py @@ -1,10 +1,9 @@ import unittest -import unittest.mock as mock import dataclasses import typing from src.util import basic, exceptions from src.util import dataclass as util -from src.util import datelabel as dt # only used to construct one test instance +from src.util import datelabel as dt # only used to construct one test instance class TestRegexPattern(unittest.TestCase): @@ -16,7 +15,7 @@ def test_regex_dataclass(self): ppat = util.RegexPattern(regex) @util.regex_dataclass(ppat) - class A(): + class A: foo: int bar: int @@ -31,12 +30,14 @@ class A(): self.assertEqual(b.foo, 3) self.assertEqual(b.bar, 4) + class TestRegexDataclassInheritance(unittest.TestCase): def test_initvar(self): grid_label_regex = util.RegexPattern(r""" g(?Pm?)(?P\d?) """, input_field="grid_label" - ) + ) + @util.regex_dataclass(grid_label_regex) class CMIP6_GridLabel(): grid_label: str = util.MANDATORY @@ -53,7 +54,8 @@ def __post_init__(self, global_mean=None): drs_directory_regex = util.RegexPattern(r""" /?(CMIP6/)?(?P\w+)/(?P\w+)/ """, input_field="directory" - ) + ) + @util.regex_dataclass(drs_directory_regex) class CMIP6_DRSDirectory(CMIP6_GridLabel): directory: str = "" @@ -76,9 +78,10 @@ def test_conflicts(self): parent1_regex = util.RegexPattern(r""" g(?Pm?)(?P\d?) """, input_field="parent1" - ) + ) + @util.regex_dataclass(parent1_regex) - class Parent1(): + class Parent1: parent1: str = util.MANDATORY global_mean: dataclasses.InitVar = "" grid_number: int = 0 @@ -93,9 +96,10 @@ def __post_init__(self, global_mean=None): parent2_regex = util.RegexPattern(r""" x(?P\d?)x(?P\w+)x """, input_field="parent2" - ) + ) + @util.regex_dataclass(parent2_regex) - class Parent2(): + class Parent2: parent2: str = util.MANDATORY grid_number: int = 0 spatial_avg: str = "" @@ -107,7 +111,8 @@ def __post_init__(self): child_regex = util.RegexPattern(r""" (?P\w+)/(?P\w+)/(?P\w+)/ """, input_field="directory" - ) + ) + @util.regex_dataclass(child_regex) class Child(Parent1, Parent2): directory: str = "" @@ -120,9 +125,9 @@ class Child(Parent1, Parent2): self.assertDictEqual( dataclasses.asdict(foo), {'parent2': 'x6xglobalx', 'grid_number': 6, 'spatial_avg': 'global_mean', - 'parent1': 'gm6', 'directory': 'bazinga/gm6/x6xglobalx/', - 'activity_id': 'bazinga', 'grid_label': 'gm6', - 'redundant_label': 'x6xglobalx'} + 'parent1': 'gm6', 'directory': 'bazinga/gm6/x6xglobalx/', + 'activity_id': 'bazinga', 'grid_label': 'gm6', + 'redundant_label': 'x6xglobalx'} ) # conflict in assignment to fields of same name in parent dataclasses with self.assertRaises(exceptions.DataclassParseError): @@ -130,6 +135,7 @@ class Child(Parent1, Parent2): with self.assertRaises(exceptions.DataclassParseError): _ = Child('bazinga/gm6/x6xNOT_THE_SAMEx/') + class TestMDTFDataclass(unittest.TestCase): def test_builtin_coerce(self): @util.mdtf_dataclass @@ -138,10 +144,10 @@ class Dummy(object): b: int = None c: list = None - dummy = Dummy(a="foo", b="5", c=(1,2,3)) + dummy = Dummy(a="foo", b="5", c=(1, 2, 3)) self.assertEqual(dummy.a, "foo") self.assertEqual(dummy.b, 5) - self.assertEqual(dummy.c, [1,2,3]) + self.assertEqual(dummy.c, [1, 2, 3]) def test_builtin_coerce_pre_postinit(self): @util.mdtf_dataclass @@ -291,14 +297,14 @@ class Dummy(object): dummy = Dummy(a=(1, 2), b=(1, 2)) self.assertEqual(dummy.a, dummy.b) self.assertEqual(dummy.c, 6) - dummy = Dummy(a=(1,2), b=(1,2), c=[1,2]) - self.assertEqual(dummy.c, [1,2]) - dummy = Dummy(a=(1,2), b=(1,2), c=5) + dummy = Dummy(a=(1, 2), b=(1, 2), c=[1, 2]) + self.assertEqual(dummy.c, [1, 2]) + dummy = Dummy(a=(1, 2), b=(1, 2), c=5) self.assertEqual(dummy.c, 5) - dummy = Dummy(a=(1,2), b=(1,2), d=[1,2]) - self.assertEqual(dummy.d, [1,2]) + dummy = Dummy(a=(1, 2), b=(1, 2), d=[1, 2]) + self.assertEqual(dummy.d, [1, 2]) with self.assertRaises(exceptions.DataclassParseError): - _ = Dummy(a=(1,2), b=(1,2), d=(1,2)) + _ = Dummy(a=(1, 2), b=(1, 2), d=(1, 2)) def test_typing_generics_2(self): def dummy_f(x: str) -> int: @@ -310,20 +316,21 @@ class Dummy(object): b: typing.TypeVar('foo') = None c: typing.Callable[[int], str] = util.NOTSET d: typing.Generic[typing.TypeVar('X'), typing.TypeVar('X')] = None - e: typing.Tuple[int, int] = (5,6) + e: typing.Tuple[int, int] = (5, 6) dummy = Dummy(a="a") self.assertEqual(dummy.a, "a") self.assertEqual(dummy.b, None) self.assertEqual(dummy.c, util.NOTSET) self.assertEqual(dummy.d, None) - self.assertEqual(dummy.e, (5,6)) - dummy = Dummy(a="a", b="bar", c=dummy_f, d="also_ignored", e=[1,2]) + self.assertEqual(dummy.e, (5, 6)) + dummy = Dummy(a="a", b="bar", c=dummy_f, d="also_ignored", e=[1, 2]) self.assertEqual(dummy.a, "a") self.assertEqual(dummy.b, "bar") self.assertEqual(dummy.c, dummy_f) self.assertEqual(dummy.d, "also_ignored") - self.assertEqual(dummy.e, (1,2)) + self.assertEqual(dummy.e, (1, 2)) + if __name__ == '__main__': unittest.main() diff --git a/src/util/tests/test_datelabel.py b/src/util/tests/test_datelabel.py index cc801c2e9..755fa3d75 100644 --- a/src/util/tests/test_datelabel.py +++ b/src/util/tests/test_datelabel.py @@ -1,44 +1,44 @@ -import os import unittest import datetime from src.util.datelabel import Date as dt from src.util.datelabel import DateRange as dt_range from src.util.datelabel import DateFrequency as dt_freq from src.util.datelabel import FXDateMin, FXDateMax, FXDateRange -from src.util.exceptions import FXDateException, MixedDatePrecisionException +from src.util.exceptions import FXDateException + class TestDate(unittest.TestCase): def test_init(self): - self.assertEqual(dt(2019), datetime.datetime(2019,1,1)) + self.assertEqual(dt(2019), datetime.datetime(2019, 1, 1)) self.assertEqual(dt(2019).precision, 1) - self.assertEqual(dt(2019,9,18), datetime.datetime(2019,9,18)) - self.assertEqual(dt(2019,9,18).precision, 3) + self.assertEqual(dt(2019, 9, 18), datetime.datetime(2019, 9, 18)) + self.assertEqual(dt(2019, 9, 18).precision, 3) def test_init_coerce(self): - self.assertEqual(dt(datetime.datetime(2019,1,1), 1), dt(2019)) - self.assertEqual(dt(datetime.datetime(2019,5,1), 2), dt(2019, 5)) - self.assertEqual(dt(datetime.datetime(2019,5,18), 2), dt(2019, 5)) + self.assertEqual(dt(datetime.datetime(2019, 1, 1), 1), dt(2019)) + self.assertEqual(dt(datetime.datetime(2019, 5, 1), 2), dt(2019, 5)) + self.assertEqual(dt(datetime.datetime(2019, 5, 18), 2), dt(2019, 5)) def test_init_epoch(self): # Make sure we're not doing platform-dependent stuff that breaks # outside of 1970-2038 - self.assertEqual(dt(1850), datetime.datetime(1850,1,1)) + self.assertEqual(dt(1850), datetime.datetime(1850, 1, 1)) self.assertEqual(dt(1850).precision, 1) - self.assertEqual(dt(2112,9,18), datetime.datetime(2112,9,18)) - self.assertEqual(dt(2112,9,18).precision, 3) - self.assertEqual(dt(datetime.datetime(1850,1,1), 1), dt(1850)) - self.assertEqual(dt(datetime.datetime(1850,5,1), 2), dt(1850, 5)) - self.assertEqual(dt(datetime.datetime(1850,5,18), 2), dt(1850, 5)) - self.assertEqual(dt(datetime.datetime(2112,1,1), 1), dt(2112)) - self.assertEqual(dt(datetime.datetime(2112,5,1), 2), dt(2112, 5)) - self.assertEqual(dt(datetime.datetime(2112,5,18), 2), dt(2112, 5)) + self.assertEqual(dt(2112, 9, 18), datetime.datetime(2112, 9, 18)) + self.assertEqual(dt(2112, 9, 18).precision, 3) + self.assertEqual(dt(datetime.datetime(1850, 1, 1), 1), dt(1850)) + self.assertEqual(dt(datetime.datetime(1850, 5, 1), 2), dt(1850, 5)) + self.assertEqual(dt(datetime.datetime(1850, 5, 18), 2), dt(1850, 5)) + self.assertEqual(dt(datetime.datetime(2112, 1, 1), 1), dt(2112)) + self.assertEqual(dt(datetime.datetime(2112, 5, 1), 2), dt(2112, 5)) + self.assertEqual(dt(datetime.datetime(2112, 5, 18), 2), dt(2112, 5)) def test_string_parsing(self): - self.assertEqual(dt('2019'), datetime.datetime(2019,1,1)) + self.assertEqual(dt('2019'), datetime.datetime(2019, 1, 1)) self.assertEqual(dt('2019').precision, 1) - self.assertEqual(dt('2019091814'), datetime.datetime(2019,9,18,14)) + self.assertEqual(dt('2019091814'), datetime.datetime(2019, 9, 18, 14)) self.assertEqual(dt('2019091814').precision, 4) - self.assertEqual(dt('2019-09-18'), datetime.datetime(2019,9,18)) + self.assertEqual(dt('2019-09-18'), datetime.datetime(2019, 9, 18)) self.assertEqual(dt('2019-09-18').precision, 3) def test_string_output(self): @@ -54,29 +54,29 @@ def test_string_output_iso(self): def test_comparisons_same(self): self.assertTrue(dt(2018) < dt(2019)) - self.assertTrue(dt(2019,9) > dt(2018)) - self.assertTrue(dt(2019,9) > dt(2019)) - self.assertTrue(dt(2019,1) >= dt(2019)) - self.assertTrue(dt(2019,1,1,12) <= dt(2019,2)) + self.assertTrue(dt(2019, 9) > dt(2018)) + self.assertTrue(dt(2019, 9) > dt(2019)) + self.assertTrue(dt(2019, 1) >= dt(2019)) + self.assertTrue(dt(2019, 1, 1, 12) <= dt(2019, 2)) def test_comparisons_parent(self): - self.assertTrue(dt(2018) < datetime.datetime(2019,1,1)) - self.assertTrue(dt(2019,9) > datetime.datetime(2018,12,25,23)) + self.assertTrue(dt(2018) < datetime.datetime(2019, 1, 1)) + self.assertTrue(dt(2019, 9) > datetime.datetime(2018, 12, 25, 23)) def test_comparisons_coerce(self): - self.assertTrue(dt(2018) <= datetime.date(2019,1,1)) - self.assertTrue(dt(2019,9) >= datetime.date(2018,12,25)) + self.assertTrue(dt(2018) <= datetime.date(2019, 1, 1)) + self.assertTrue(dt(2019, 9) >= datetime.date(2018, 12, 25)) def test_minmax(self): - test = [dt(2019,2), dt(2019,9), dt(2018), - dt(2019), dt(2019,1,1,12)] - self.assertEqual(max(test), dt(2019,9)) + test = [dt(2019, 2), dt(2019, 9), dt(2018), + dt(2019), dt(2019, 1, 1, 12)] + self.assertEqual(max(test), dt(2019, 9)) self.assertEqual(min(test), dt(2018)) def test_attributes(self): test = dt(2019) self.assertEqual(test.year, 2019) - test = dt(2019,9,18, 23) + test = dt(2019, 9, 18, 23) self.assertEqual(test.year, 2019) self.assertEqual(test.month, 9) self.assertEqual(test.day, 18) @@ -85,40 +85,40 @@ def test_attributes(self): def test_incr_decr(self): test = dt(2019) args = (test.lower, test.precision) - self.assertEqual(test.increment(*args), datetime.datetime(2020,1,1)) - self.assertEqual(test.decrement(*args), datetime.datetime(2018,1,1)) - test = dt(2019,1) + self.assertEqual(test.increment(*args), datetime.datetime(2020, 1, 1)) + self.assertEqual(test.decrement(*args), datetime.datetime(2018, 1, 1)) + test = dt(2019, 1) args = (test.lower, test.precision) - self.assertEqual(test.increment(*args), datetime.datetime(2019, 2,1)) - self.assertEqual(test.decrement(*args), datetime.datetime(2018, 12,1)) + self.assertEqual(test.increment(*args), datetime.datetime(2019, 2, 1)) + self.assertEqual(test.decrement(*args), datetime.datetime(2018, 12, 1)) # leap year - test = dt(2020,2,28) + test = dt(2020, 2, 28) args = (test.lower, test.precision) - self.assertEqual(test.increment(*args), datetime.datetime(2020,2,29)) - test = dt(2020,3,1,0) + self.assertEqual(test.increment(*args), datetime.datetime(2020, 2, 29)) + test = dt(2020, 3, 1, 0) args = (test.lower, test.precision) - self.assertEqual(test.decrement(*args), datetime.datetime(2020,2,29,23)) + self.assertEqual(test.decrement(*args), datetime.datetime(2020, 2, 29, 23)) class TestDateRange(unittest.TestCase): def test_string_parsing(self): self.assertEqual(dt_range('2010-2019'), - dt_range(dt(2010), dt(2019))) + dt_range(dt(2010), dt(2019))) self.assertEqual(dt_range('20100201-20190918'), - dt_range(dt(2010,2,1), dt(2019,9,18))) + dt_range(dt(2010, 2, 1), dt(2019, 9, 18))) def test_input_string_parsing(self): self.assertEqual(dt_range(2010, 2019), - dt_range(dt(2010), dt(2019))) + dt_range(dt(2010), dt(2019))) self.assertEqual(dt_range('20100201', '20190918'), - dt_range(dt(2010,2,1), dt(2019,9,18))) + dt_range(dt(2010, 2, 1), dt(2019, 9, 18))) def test_input_list_parsing(self): self.assertEqual( dt_range.from_date_span(dt(2015), dt(2010), dt(2019), dt(2017)), dt_range(2010, 2019)) self.assertEqual(dt_range(['20100201', '20190918']), - dt_range('20100201', '20190918')) + dt_range('20100201', '20190918')) def test_input_range_parsing(self): dtr1 = dt_range('20190101', '20190131') @@ -126,11 +126,11 @@ def test_input_range_parsing(self): dtr3 = dt_range('20190301', '20190331') self.assertEqual( dt_range.from_contiguous_span(dtr1, dtr2, dtr3), - dt_range(dt(2019,1,1), dt(2019,3,31)) + dt_range(dt(2019, 1, 1), dt(2019, 3, 31)) ) self.assertEqual( dt_range.from_contiguous_span(dtr3, dtr1, dtr2), - dt_range(dt(2019,1,1), dt(2019,3,31)) + dt_range(dt(2019, 1, 1), dt(2019, 3, 31)) ) with self.assertRaises(ValueError): _ = dt_range.from_contiguous_span(dtr3, dtr1) @@ -216,17 +216,17 @@ def test_more_overlaps(self): rng1 = dt_range('1980-1990') rng2 = [ (dt_range('19780501-19781225'), False, False, False, False), - (dt_range('19780501-19800101'), True, True, False, False), - (dt_range('19780501-19871225'), True, True, False, False), - (dt_range('19780501-19901231'), True, True, True, False), - (dt_range('19780501-19981225'), True, True, True, False), - (dt_range('19800101-19871225'), True, True, False, True), - (dt_range('19800101-19901231'), True, True, True, True), - (dt_range('19800101-19981225'), True, True, True, False), - (dt_range('19830501-19871225'), True, True, False, True), - (dt_range('19830501-19901231'), True, True, False, True), - (dt_range('19830501-19981225'), True, True, False, False), - (dt_range('19901231-19981225'), True, True, False, False), + (dt_range('19780501-19800101'), True, True, False, False), + (dt_range('19780501-19871225'), True, True, False, False), + (dt_range('19780501-19901231'), True, True, True, False), + (dt_range('19780501-19981225'), True, True, True, False), + (dt_range('19800101-19871225'), True, True, False, True), + (dt_range('19800101-19901231'), True, True, True, True), + (dt_range('19800101-19981225'), True, True, True, False), + (dt_range('19830501-19871225'), True, True, False, True), + (dt_range('19830501-19901231'), True, True, False, True), + (dt_range('19830501-19981225'), True, True, False, False), + (dt_range('19901231-19981225'), True, True, False, False), (dt_range('19930501-19981225'), False, False, False, False) ] for d in rng2: @@ -275,6 +275,7 @@ def test_start_end_properties(self): self.assertEqual(rng.start, dt('19800101')) self.assertEqual(rng.end, dt('19871225')) + class TestFXDates(unittest.TestCase): def test_compare(self): dtr = dt_range('19800101-19901231') @@ -340,6 +341,7 @@ def test_format(self): self.assertEqual(str(FXDateMax), "") self.assertEqual(str(FXDateRange), "") + class TestDateFrequency(unittest.TestCase): def test_string_parsing(self): self.assertEqual(dt_freq('1hr'), dt_freq(1, 'hr')) @@ -358,28 +360,28 @@ def test_from_struct(self): self.assertEqual(dt_freq.from_struct('2 weeks'), dt_freq(2, 'wk')) def test_comparisons_same_unit(self): - self.assertTrue(dt_freq(1,'hr') < dt_freq(2,'hr')) - self.assertTrue(dt_freq(5,'yr') > dt_freq(2,'yr')) - self.assertTrue(dt_freq(1,'se') <= dt_freq(1,'se')) - self.assertTrue(dt_freq(2,'mo') >= dt_freq(2,'mo')) - self.assertTrue(dt_freq(1,'hr') <= dt_freq(2,'hr')) + self.assertTrue(dt_freq(1, 'hr') < dt_freq(2, 'hr')) + self.assertTrue(dt_freq(5, 'yr') > dt_freq(2, 'yr')) + self.assertTrue(dt_freq(1, 'se') <= dt_freq(1, 'se')) + self.assertTrue(dt_freq(2, 'mo') >= dt_freq(2, 'mo')) + self.assertTrue(dt_freq(1, 'hr') <= dt_freq(2, 'hr')) def test_comparisons_different_unit(self): - self.assertTrue(dt_freq(3,'hr') < dt_freq(2,'dy')) - self.assertTrue(dt_freq(2,'yr') > dt_freq(6,'mo')) - self.assertTrue(dt_freq(7,'dy') <= dt_freq(1,'wk')) - self.assertTrue(dt_freq(24,'hr') >= dt_freq(1,'dy')) - self.assertTrue(dt_freq(1,'hr') <= dt_freq(2,'yr')) + self.assertTrue(dt_freq(3, 'hr') < dt_freq(2, 'dy')) + self.assertTrue(dt_freq(2, 'yr') > dt_freq(6, 'mo')) + self.assertTrue(dt_freq(7, 'dy') <= dt_freq(1, 'wk')) + self.assertTrue(dt_freq(24, 'hr') >= dt_freq(1, 'dy')) + self.assertTrue(dt_freq(1, 'hr') <= dt_freq(2, 'yr')) def test_minmax_same_unit(self): - test = [dt_freq(n,'hr') for n in [6, 1, 12, 36, 3]] + test = [dt_freq(n, 'hr') for n in [6, 1, 12, 36, 3]] self.assertEqual(max(test), dt_freq(36, 'hr')) self.assertEqual(min(test), dt_freq(1, 'hr')) def test_minmax_different_unit(self): - test = [dt_freq(n,'dy') for n in [2, 7, 1]] - test = test + [dt_freq(n,'hr') for n in [12, 36, 3]] - test = test + [dt_freq(n,'wk') for n in [3, 1]] + test = [dt_freq(n, 'dy') for n in [2, 7, 1]] + test = test + [dt_freq(n, 'hr') for n in [12, 36, 3]] + test = test + [dt_freq(n, 'wk') for n in [3, 1]] self.assertEqual(max(test), dt_freq(3, 'wk')) self.assertEqual(min(test), dt_freq(3, 'hr')) @@ -390,15 +392,16 @@ def test_fx_parsing(self): self.assertEqual(dt_freq('fx').format(), 'fx') def test_fx_comparisons(self): - self.assertTrue(dt_freq('fx') > dt_freq(2000,'yr')) - self.assertTrue(dt_freq('fx') > dt_freq(6,'dy')) - self.assertTrue(dt_freq('fx') > dt_freq(1,'wk')) + self.assertTrue(dt_freq('fx') > dt_freq(2000, 'yr')) + self.assertTrue(dt_freq('fx') > dt_freq(6, 'dy')) + self.assertTrue(dt_freq('fx') > dt_freq(1, 'wk')) def test_is_static(self): self.assertTrue(dt_freq('fx').is_static) - self.assertFalse(dt_freq(2000,'yr').is_static) - self.assertFalse(dt_freq(6,'dy').is_static) - self.assertFalse(dt_freq(1,'hr').is_static) + self.assertFalse(dt_freq(2000, 'yr').is_static) + self.assertFalse(dt_freq(6, 'dy').is_static) + self.assertFalse(dt_freq(1, 'hr').is_static) + if __name__ == '__main__': unittest.main() diff --git a/src/util/tests/test_filesystem.py b/src/util/tests/test_filesystem.py index 44d7a3a57..9ccbe58c1 100644 --- a/src/util/tests/test_filesystem.py +++ b/src/util/tests/test_filesystem.py @@ -4,8 +4,10 @@ import unittest.mock as mock from src.util import filesystem as util from src.util import exceptions + + class TestCheckDirs(unittest.TestCase): - @mock.patch('os.path.isdir', return_value = True) + @mock.patch('os.path.isdir', return_value=True) @mock.patch('os.makedirs') def test_check_dirs_found(self, mock_makedirs, mock_isdir): # exit function normally if all directories found @@ -16,7 +18,7 @@ def test_check_dirs_found(self, mock_makedirs, mock_isdir): self.fail() mock_makedirs.assert_not_called() - @mock.patch('os.path.isdir', return_value = False) + @mock.patch('os.path.isdir', return_value=False) @mock.patch('os.makedirs') def test_check_dirs_not_found(self, mock_makedirs, mock_isdir): # try to exit() if any directories not found @@ -24,7 +26,7 @@ def test_check_dirs_not_found(self, mock_makedirs, mock_isdir): util.check_dir('DUMMY/PATH/NAME', create=False) mock_makedirs.assert_not_called() - @mock.patch('os.path.isdir', return_value = False) + @mock.patch('os.path.isdir', return_value=False) @mock.patch('os.makedirs') def test_check_dirs_not_found_created(self, mock_makedirs, mock_isdir): # don't exit() and call os.makedirs if in create_if_nec @@ -34,6 +36,7 @@ def test_check_dirs_not_found_created(self, mock_makedirs, mock_isdir): self.fail() mock_makedirs.assert_called_once_with('DUMMY/PATH/NAME', exist_ok=False) + class TestBumpVersion(unittest.TestCase): @mock.patch('os.path.exists', return_value=False) def test_bump_version_noexist(self, mock_exists): @@ -57,12 +60,12 @@ def test_bump_version_getver(self, mock_exists): @mock.patch('os.path.exists', return_value=False) def test_bump_version_delver(self, mock_exists): for f in [ - ('AAA','AAA'), ('AAA.v1','AAA'), ('D/C/B/AA','D/C/B/AA'), - ('D/C.v1/B/AA/','D/C.v1/B/AA/'), ('D/C/B/AA.v23','D/C/B/AA'), - ('D/C3/B.v8/AA.v23/','D/C3/B.v8/AA/'), ('A.foo','A.foo'), - ('A.v23.foo','A.foo'), ('A.v23.bar.v45.foo','A.v23.bar.foo'), - ('D/C/A.foo','D/C/A.foo'), ('D/C.v1/A234.v3.foo','D/C.v1/A234.foo'), - ('D/C/A.v23.bar.v45.foo','D/C/A.v23.bar.foo') + ('AAA', 'AAA'), ('AAA.v1', 'AAA'), ('D/C/B/AA', 'D/C/B/AA'), + ('D/C.v1/B/AA/', 'D/C.v1/B/AA/'), ('D/C/B/AA.v23', 'D/C/B/AA'), + ('D/C3/B.v8/AA.v23/', 'D/C3/B.v8/AA/'), ('A.foo', 'A.foo'), + ('A.v23.foo', 'A.foo'), ('A.v23.bar.v45.foo', 'A.v23.bar.foo'), + ('D/C/A.foo', 'D/C/A.foo'), ('D/C.v1/A234.v3.foo', 'D/C.v1/A234.foo'), + ('D/C/A.v23.bar.v45.foo', 'D/C/A.v23.bar.foo') ]: f1, ver = util.bump_version(f[0], new_v=0) self.assertEqual(f1, f[1]) @@ -71,12 +74,12 @@ def test_bump_version_delver(self, mock_exists): @mock.patch('os.path.exists', return_value=False) def test_bump_version_setver(self, mock_exists): for f in [ - ('AAA','AAA.v42'), ('AAA.v1','AAA.v42'), ('D/C/B/AA','D/C/B/AA.v42'), - ('D/C.v1/B/AA/','D/C.v1/B/AA.v42/'), ('D/C/B/AA.v23','D/C/B/AA.v42'), - ('D/C3/B.v8/AA.v23/','D/C3/B.v8/AA.v42/'), ('A.foo','A.v42.foo'), - ('A.v23.foo','A.v42.foo'), ('A.v23.bar.v45.foo','A.v23.bar.v42.foo'), - ('D/C/A.foo','D/C/A.v42.foo'), ('D/C.v1/A.v23.foo','D/C.v1/A.v42.foo'), - ('D/C/A.v23.bar.v45.foo','D/C/A.v23.bar.v42.foo') + ('AAA', 'AAA.v42'), ('AAA.v1', 'AAA.v42'), ('D/C/B/AA', 'D/C/B/AA.v42'), + ('D/C.v1/B/AA/', 'D/C.v1/B/AA.v42/'), ('D/C/B/AA.v23', 'D/C/B/AA.v42'), + ('D/C3/B.v8/AA.v23/', 'D/C3/B.v8/AA.v42/'), ('A.foo', 'A.v42.foo'), + ('A.v23.foo', 'A.v42.foo'), ('A.v23.bar.v45.foo', 'A.v23.bar.v42.foo'), + ('D/C/A.foo', 'D/C/A.v42.foo'), ('D/C.v1/A.v23.foo', 'D/C.v1/A.v42.foo'), + ('D/C/A.v23.bar.v45.foo', 'D/C/A.v23.bar.v42.foo') ]: f1, ver = util.bump_version(f[0], new_v=42) self.assertEqual(f1, f[1]) @@ -108,6 +111,7 @@ def test_bump_version_setver(self, mock_exists): # self.assertEqual(f1, f[1]) # self.assertEqual(ver, f[2]) + class TestJSONC(unittest.TestCase): def test_parse_json_basic(self): s = """{ @@ -122,15 +126,15 @@ def test_parse_json_basic(self): } """ d = util.parse_json(s) - self.assertEqual(set(d.keys()), set(['a','b','c','d','e'])) + self.assertEqual(set(d.keys()), {'a', 'b', 'c', 'd', 'e'}) self.assertEqual(d['a'], "test_string") self.assertEqual(d['b'], 3) self.assertEqual(d['c'], False) self.assertEqual(len(d['d']), 3) - self.assertEqual(d['d'], [1,2,3]) - self.assertEqual(set(d['e'].keys()), set(['aa','bb'])) + self.assertEqual(d['d'], [1, 2, 3]) + self.assertEqual(set(d['e'].keys()), {'aa', 'bb'}) self.assertEqual(len(d['e']['aa']), 3) - self.assertEqual(d['e']['aa'], [4,5,6]) + self.assertEqual(d['e']['aa'], [4, 5, 6]) self.assertEqual(d['e']['bb'], True) def test_parse_json_comments(self): @@ -150,7 +154,7 @@ def test_parse_json_comments(self): """ d = util.parse_json(s) - self.assertEqual(set(d.keys()), set(['a','b // c','e','f'])) + self.assertEqual(set(d.keys()), {'a', 'b // c', 'e', 'f'}) self.assertEqual(d['a'], 1) self.assertEqual(d['b // c'], "// d x ////") self.assertEqual(d['e'], False) @@ -244,6 +248,7 @@ def test_strip_comments_quote_escape(self): ('"foo": bar\\"ba', [0]) ) + class TestDoubleBraceTemplate(unittest.TestCase): def sub(self, template_text, template_dict=dict()): tmp = util._DoubleBraceTemplate(template_text) @@ -262,25 +267,25 @@ def test_replace_2(self): self.assertEqual( self.sub("asdf\t{{\t foo \n\t }}baz", {'foo': 'bar'}), "asdf\tbarbaz" - ) + ) def test_replace_3(self): self.assertEqual( self.sub( "{{FOO}}\n{{ foo }}asdf\t{{\t FOO \n\t }}baz_{{foo}}", - {'foo': 'bar', 'FOO':'BAR'} + {'foo': 'bar', 'FOO': 'BAR'} ), "BAR\nbarasdf\tBARbaz_bar" - ) + ) def test_replace_4(self): self.assertEqual( self.sub( "]{ {{_F00}}\n{{ f00 }}as{ { }\n.d'f\t{{\t _F00 \n\t }}ba} {[z_{{f00}}", - {'f00': 'bar', '_F00':'BAR'} + {'f00': 'bar', '_F00': 'BAR'} ), "]{ BAR\nbaras{ { }\n.d'f\tBARba} {[z_bar" - ) + ) def test_ignore_1(self): self.assertEqual(self.sub("{{goo}}", {'foo': 'bar'}), "{{goo}}") @@ -289,16 +294,16 @@ def test_ignore_2(self): self.assertEqual( self.sub("asdf\t{{\t goo \n\t }}baz", {'foo': 'bar'}), "asdf\t{{\t goo \n\t }}baz" - ) + ) def test_ignore_3(self): self.assertEqual( self.sub( "{{FOO}}\n{{ goo }}asdf\t{{\t FOO \n\t }}baz_{{goo}}", - {'foo': 'bar', 'FOO':'BAR'} + {'foo': 'bar', 'FOO': 'BAR'} ), "BAR\n{{ goo }}asdf\tBARbaz_{{goo}}" - ) + ) def test_nomatch_1(self): self.assertEqual(self.sub("{{foo", {'foo': 'bar'}), "{{foo") @@ -307,18 +312,19 @@ def test_nomatch_2(self): self.assertEqual( self.sub("asdf\t{{\t foo \n\t }baz", {'foo': 'bar'}), "asdf\t{{\t foo \n\t }baz" - ) + ) def test_nomatch_3(self): self.assertEqual( self.sub( "{{FOO\n{{ foo }asdf}}\t{{\t FOO \n\t }}baz_{{foo}}", - {'foo': 'bar', 'FOO':'BAR'} + {'foo': 'bar', 'FOO': 'BAR'} ), "{{FOO\n{{ foo }asdf}}\tBARbaz_bar" - ) + ) + # --------------------------------------------------- if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/src/util/tests/test_processes.py b/src/util/tests/test_processes.py index cab066a76..8577edcba 100644 --- a/src/util/tests/test_processes.py +++ b/src/util/tests/test_processes.py @@ -1,31 +1,31 @@ import unittest -import unittest.mock as mock from src.util import processes as util + class TestSubprocessInteraction(unittest.TestCase): def test_run_shell_commands_stdout1(self): - input = 'echo "foo"' - out = util.run_shell_command(input) + in_args = ['echo "foo"'] + out = util.run_shell_command(in_args) self.assertEqual(len(out), 1) self.assertEqual(out[0], 'foo') def test_run_shell_commands_stdout2(self): - input = 'echo "foo" && echo "bar"' - out = util.run_shell_command(input) + in_args = ['echo "foo" && echo "bar"'] + out = util.run_shell_command(in_args) self.assertEqual(len(out), 2) self.assertEqual(out[0], 'foo') self.assertEqual(out[1], 'bar') def test_run_shell_commands_exitcode(self): - input = 'echo "foo"; false' + in_args = ['echo "foo"; false'] with self.assertRaises(Exception): # I couldn't get this to catch MDTFCalledProcessError specifically, # maybe because it takes args? - util.run_shell_command(input) + util.run_shell_command(in_args) def test_run_shell_commands_envvars(self): - input = 'echo $FOO; export FOO="baz"; echo $FOO' - out = util.run_shell_command(input, env={'FOO':'bar'}) + in_args = ['echo $FOO; export FOO="baz"; echo $FOO'] + out = util.run_shell_command(in_args, env={'FOO': 'bar'}) self.assertEqual(len(out), 2) self.assertEqual(out[0], 'bar') self.assertEqual(out[1], 'baz') @@ -51,11 +51,12 @@ def test_run_command_stdout1(self): self.assertEqual(out[0], '"foo"') def test_run_command_exitcode(self): - input = ['exit', '1'] + in_args = ['exit', '1'] with self.assertRaises(Exception): # I couldn't get this to catch MDTFCalledProcessError specifically, # maybe because it takes args? - util.run_command(input) + util.run_command(in_args) + # --------------------------------------------------- diff --git a/src/varlist_util.py b/src/varlist_util.py index 2e069fb8e..90c33c734 100644 --- a/src/varlist_util.py +++ b/src/varlist_util.py @@ -1,12 +1,34 @@ """Classes that define varlist coordinates and other attributes """ import dataclasses as dc +import itertools import typing -from abc import ABC -from src import util, data_model +from src import util +from src import data_model +from src import translation +import os +import logging -# The definitions and classes in the varlist_util module are separate from the -# diagnostic module to prevent circular module imports +_log = logging.getLogger(__name__) + + +# --------------VarlistEntry--------------------- + +VarlistEntryRequirement = util.MDTFEnum( + 'VarlistEntryRequirement', + 'REQUIRED OPTIONAL ALTERNATE AUX_COORDINATE', + module=__name__ +) + +VarlistEntryRequirement.__doc__ = """ +:class:`util.MDTFEnum` used to track whether the DataSource is required to +provide data for the :class:`VarlistEntry`. +""" + +_coord_env_var_suffix = '_coord' +_coord_bounds_env_var_suffix = '_bnds' +_var_name_env_var_suffix = '_var' +_file_env_var_suffix = '_FILE' PodDataFileFormat = util.MDTFEnum( 'PodDataFileFormat', @@ -18,7 +40,7 @@ @util.mdtf_dataclass -class _VarlistGlobalSettings(object): +class _VarlistGlobalSettings: format: PodDataFileFormat = \ dc.field(default=PodDataFileFormat.ANY_NETCDF_CLASSIC, metadata={'query': True}) rename_variables: bool = False @@ -27,8 +49,8 @@ class _VarlistGlobalSettings(object): @util.mdtf_dataclass -class _VarlistTimeSettings(object): - frequency: util.DateFrequency = \ +class _VarlistTimeSettings: + frequency: util.DateFrequency = \ dc.field(default=util.NOTSET, metadata={'query': True}) min_frequency: util.DateFrequency = \ dc.field(default=util.NOTSET, metadata={'query': True}) @@ -39,7 +61,371 @@ class _VarlistTimeSettings(object): @util.mdtf_dataclass -class VarlistSettings(_VarlistGlobalSettings, _VarlistTimeSettings): +class VarlistEntryBase(metaclass=util.MDTFABCMeta): + """Base class for VarlistEntry + + Attributes: + use_exact_name: see docs + env_var: Name of env var which is set to the variable's name in the + provided dataset. + path_variable: Name of env var containing path(s) to local data. + dest_path: Path(s) to local data. + alternates: List of lists of VarlistEntries. + translation: :class:`core.TranslatedVarlistEntry`, populated by DataSource. + data: dict mapping experiment_keys to DataKeys. Populated by DataSource. + """ + + def __init_subclass__(cls): + required_class_variables = [ + 'use_exact_name', + 'env_var', + 'requirement', + 'alternates', + 'translation', + 'data', + '_deactivation_log_level' + ] + for var in required_class_variables: + if not hasattr(cls, var): + raise NotImplementedError( + f'Class {cls} lacks required `{var}` class attribute' + ) + + def __post_init__(self): + pass + + @property + def _children(self): + pass + + @property + def name_in_model(self): + pass + + @classmethod + def from_struct(cls): + pass + + def iter_alternates(self): + + def _iter_alternates(): + pass + + @staticmethod + def alternates_str(alt_list): + pass + + def debug_str(self): + pass + + def iter_data_keys(self): + pass + + def deactivate_data_key(self): + pass + + @property + def local_data(self): + pass + + def query_attrs(self): + def iter_query_attrs(): + pass + + +@util.mdtf_dataclass +class VarlistEntry(VarlistEntryBase, util.MDTFObjectBase, data_model.DMVariable, + _VarlistGlobalSettings, util.VarlistEntryLoggerMixin): + """Class to describe data for a single variable requested by a POD. + Corresponds to list entries in the "varlist" section of the POD's + settings.jsonc file. + + Two VarlistEntries are equal (as determined by the ``__eq__`` method, which + compares fields without ``compare=False``) if they specify the same data + product, ie if the same output file from the preprocessor can be symlinked + to two different locations. + + Attributes: + use_exact_name: see docs + env_var: Name of env var which is set to the variable's name in the + provided dataset. + path_variable: Name of env var containing path to local data. + dest_path: Path to local data. + alternates: List of lists of VarlistEntries. + translation: :class:`core.TranslatedVarlistEntry`, populated by DataSource. + data: dict mapping experiment_keys to DataKeys. Populated by DataSource. + """ + # _id = util.MDTF_ID() # fields inherited from core.MDTFObjectBase + # name: str + # _parent: object + # log = util.MDTFObjectLogger + # status: ObjectStatus + # standard_name: str # fields inherited from data_model.DMVariable + # units: Units + # dims: list + # scalar_coords: list + # modifier: str + # env_vars: util.WormDict + use_exact_name: bool = False + env_var: str = dc.field(default="", compare=False) + path_variable: str = dc.field(default="", compare=False) + realm: str = dc.field(default="", compare=False) + dest_path: str = "" + requirement: VarlistEntryRequirement = dc.field( + default=VarlistEntryRequirement.REQUIRED, compare=False + ) + alternates: list = dc.field(default_factory=list, compare=False) + translation: typing.Any = dc.field(default=None, compare=False) + data: util.ConsistentDict = dc.field(default_factory=util.ConsistentDict, + compare=False) + _deactivation_log_level = logging.INFO # default log level for failure + associated_files: dict + + status: util.ObjectStatus = dc.field(default=util.ObjectStatus.NOTSET, compare=False) + name: str = util.MANDATORY + _parent: typing.Any = dc.field(default=util.MANDATORY, compare=False) + + def __post_init__(self, coords=None): + # set up log (VarlistEntryLoggerMixin) + self.log = util.MDTFObjectLogger.get_logger(self._log_name) + self.init_log() + data_model.DMVariable.__post_init__(self, coords) + + # (re)initialize mutable fields here so that if we copy VE (eg with .replace) + # the fields on the copy won't point to the same object as the fields on + # the original. + self.translation = None + self.data: util.ConsistentDict + # activate required vars + if self.status == util.ObjectStatus.NOTSET: + if self.requirement == VarlistEntryRequirement.REQUIRED: + self.status = util.ObjectStatus.ACTIVE + else: + self.status = util.ObjectStatus.INACTIVE + + # env_vars + if not self.env_var: + self.env_var = self.name + _var_name_env_var_suffix + + if not self.path_variable: + self.path_variable = self.name.upper() + _file_env_var_suffix + # self.alternates is either [] or a list of nonempty lists of VEs + if self.alternates: + if not isinstance(self.alternates[0], list): + self.alternates = [self.alternates] + self.alternates = [vs for vs in self.alternates if vs] + + def dims(self): + pass + + def scalar_coords(self): + pass + + def is_scalar(self): + pass + + @property + def _log_name(self): + if self._parent is None: + return util.OBJ_LOG_ROOT + else: + _log_name = f"{self.name}_{self._id}".replace('.', '_') + return f"{self._parent._log_name}.{_log_name}" + + @property + def _children(self): + """Iterable of child objects associated with this object.""" + return [] # leaves of object hierarchy + + @property + def name_in_model(self): + if self.translation and self.translation.name: + return self.translation.name + else: + return "(not translated)" + # raise ValueError(f"Translation not defined for {self.name}.") + + @classmethod + def from_struct(cls, global_settings_d, dims_d, name, parent, **kwargs): + """Instantiate from a struct in the varlist section of a POD's + settings.jsonc. + """ + new_kw = global_settings_d.copy() + new_kw['coords'] = [] + + if 'dimensions' not in kwargs: + raise ValueError(f"No dimensions specified for Varlist entry {name}.") + # validate: check for duplicate coord names + scalars = kwargs.get('scalar_coordinates', dict()) + seen = set() + dupe_names = set(x for x + in itertools.chain(kwargs['dimensions'], scalars.keys()) + if x in seen or seen.add(x)) + if dupe_names: + raise ValueError((f"Repeated coordinate names {list(dupe_names)} in " + f"Varlist entry for {name}.")) + + # add dimensions for each variable in POD settings file, and check that they + # match the definitions in the "dimensions" section + for d_name in kwargs.pop('dimensions'): + if d_name not in dims_d: + raise ValueError((f"Unknown dimension name {d_name} in varlist " + f"entry for {name}.")) + new_kw['coords'].append(dims_d[d_name]) + + # add scalar coords + if 'scalar_coordinates' in kwargs: + for d_name, scalar_val in kwargs.pop('scalar_coordinates').items(): + if d_name not in dims_d: + raise ValueError((f"Unknown dimension name {d_name} in varlist " + f"entry for {name}.")) + new_kw['coords'].append(dims_d[d_name].make_scalar(scalar_val)) + filter_kw = util.filter_dataclass(kwargs, cls, init=True) + obj = cls(name=name, _parent=parent, **new_kw, **filter_kw) + # specialize time coord + time_kw = util.filter_dataclass(kwargs, _VarlistTimeSettings) + if time_kw: + obj.change_coord('T', None, **time_kw) + return obj + + def set_env_vars(self): + """Get env var definitions for: + + - The path to the preprocessed data file for this variable, + - The name for this variable in that data file, + - The names for all of this variable's coordinate axes in that file, + - The names of the bounds variables for all of those coordinate + dimensions, if provided by the data. + + """ + if self.status != util.ObjectStatus.SUCCEEDED: + # Signal to POD's code that vars are not provided by setting + # variable to the empty string. + self.env_vars = {self.env_var: "", self.path_variable: "", "realm": ""} + + assert self.dest_path, "dest_path not defined" + self.env_vars = util.WormDict() + + assoc_dict = ( + {self.name.upper() + "_ASSOC_FILES": self.associated_files} + if isinstance(self.associated_files, str) + else {} + ) + + self.env_vars.update({ + self.env_var: self.name_in_model, + self.path_variable: self.dest_path, + "realm": self.realm, + **assoc_dict + }) + for ax, dim in self.dim_axes.items(): + trans_dim = self.translation.dim_axes[ax] + self.env_vars[dim.name + _coord_env_var_suffix] = trans_dim.name + if trans_dim.has_bounds: + self.env_vars[dim.name + _coord_bounds_env_var_suffix] = trans_dim.bounds + + def iter_alternates(self): + """Breadth-first traversal of "sets" of alternate VarlistEntries, + alternates for those alternates, etc. ("Sets" is in quotes because + they're implemented as lists here, since VarlistEntries aren't immutable.) + + This is a "deep" iterator, yielding alternates of alternates, + alternates of those, ... etc. until variables with no alternates are + encountered or all variables have been yielded. In addition, it yields + the "sets" of alternates and not the VarlistEntries themselves. + """ + + def _iter_alternates(): + stack = [[self]] + already_encountered = [] + while stack: + alt_v_set = stack.pop(0) + if alt_v_set not in already_encountered: + yield alt_v_set + already_encountered.append(alt_v_set) + for ve in alt_v_set: + for alt_v_set_of_ve in ve.alternates: + if alt_v_set_of_ve not in already_encountered: + stack.append(alt_v_set_of_ve) + + # first value yielded by _iter_alternates is the var itself, so drop + # that and then start by returning var's alternates + iterator_ = iter(_iter_alternates()) + try: + next(iterator_) + except StopIteration: + # should never get here, for above reason + yield from [] + yield from iterator_ + + @staticmethod + def alternates_str(alt_list): + return "[{}]".format(', '.join(v.full_name for v in alt_list)) + + def debug_str(self): + """String representation with more debugging information. + """ + + def _format(v): + str_ = str(v)[1:-1] + status_str = f"{v.status.name.lower()}" + status_str += (f" ({type(v.last_exception).__name__})" if v.failed else '') + if getattr(v, 'translation', None) is not None: + trans_str = str(v.translation) + trans_str = trans_str.replace("<", "'").replace(">", "'") + else: + trans_str = "(not translated)" + return (f"<{str_}; {status_str}, {v.requirement})\n" + f"\tName in data source: {trans_str}") + + s = _format(self) + for i, altvs in enumerate(self.iter_alternates()): + s += f"\n\tAlternate set #{i + 1}: {self.alternates_str(altvs)}" + return s + + def iter_associated_files_keys(self, status=None, status_neq=None): + """Yield :class:`~data_manager.DataKeyBase`\s + from v's *associated_files* dict, filtering out those DataKeys + that have beeneliminated via previous failures in fetching or preprocessing. + """ + iter_ = self.associated_files.values() + if status is not None: + iter_ = filter((lambda x: x.status == status), iter_) + elif status_neq is not None: + iter_ = filter((lambda x: x.status != status_neq), iter_) + yield from list(iter_) + + def iter_data_keys(self, status=None, status_neq=None): + """Yield :class:`~data_manager.DataKeyBase`\s + from v's *data* dict, filtering out those DataKeys that have been + eliminated via previous failures in fetching or preprocessing. + """ + iter_ = self.data.values() + if status is not None: + iter_ = filter((lambda x: x.status == status), iter_) + elif status_neq is not None: + iter_ = filter((lambda x: x.status != status_neq), iter_) + yield from list(iter_) + + @property + def local_data(self): + """Return sorted list of local file paths corresponding to the selected + experiment. + """ + + local_paths = set([]) + for d_key in self.iter_data_keys(status=util.ObjectStatus.ACTIVE): + local_paths.update(d_key.local_data) + local_paths = sorted(local_paths) + if not local_paths: + raise util.DataRequestError((f"local_data property on {self.full_name} " + "empty after fetch.")) + return local_paths + + +@util.mdtf_dataclass +class VarlistSettings(_VarlistGlobalSettings, + _VarlistTimeSettings): """Class to describe options affecting all variables requested by this POD. Corresponds to the "data" section of the POD's settings.jsonc file. """ @@ -53,16 +439,11 @@ def time_settings(self): @util.mdtf_dataclass -class VarlistCoordinateMixin(object): +class VarlistCoordinate(data_model.DMCoordinate): """Base class to describe a single dimension (in the netcdf data model sense) - used by one or more variables. Corresponds to list entries in the - "dimensions" section of the POD's settings.jsonc file. - """ - need_bounds: bool = False - - -@util.mdtf_dataclass -class VarlistCoordinate(data_model.DMCoordinate, VarlistCoordinateMixin): + used by one or more variables. Corresponds to list entries in the + "dimensions" section of the POD's settings.jsonc file. + """ # name: str # fields from data_model.DMCoordinate # standard_name: str # units: units.Units @@ -72,30 +453,27 @@ class VarlistCoordinate(data_model.DMCoordinate, VarlistCoordinateMixin): # need_bounds: bool # fields from VarlistCoordinateMixin # name_in_model: str # bounds_in_model: str + need_bounds: bool = False pass @util.mdtf_dataclass -class VarlistLongitudeCoordinate(data_model.DMLongitudeCoordinate, - VarlistCoordinateMixin): +class VarlistXCoordinate(data_model.DMXCoordinate): range: tuple = None @util.mdtf_dataclass -class VarlistLatitudeCoordinate(data_model.DMLatitudeCoordinate, - VarlistCoordinateMixin): +class VarlistYCoordinate(data_model.DMYCoordinate): range: tuple = None @util.mdtf_dataclass -class VarlistVerticalCoordinate(data_model.DMVerticalCoordinate, - VarlistCoordinateMixin): +class VarlistVerticalCoordinate(data_model.DMVerticalCoordinate): pass @util.mdtf_dataclass -class VarlistPlaceholderTimeCoordinate(data_model.DMGenericTimeCoordinate, - VarlistCoordinateMixin): +class VarlistPlaceholderTimeCoordinate(data_model.DMGenericTimeCoordinate): frequency: typing.Any = "" min_frequency: typing.Any = "" max_frequency: typing.Any = "" @@ -107,6 +485,145 @@ class VarlistPlaceholderTimeCoordinate(data_model.DMGenericTimeCoordinate, @util.mdtf_dataclass -class VarlistTimeCoordinate(_VarlistTimeSettings, data_model.DMTimeCoordinate, - VarlistCoordinateMixin, ABC): +class VarlistTimeCoordinate(_VarlistTimeSettings, + data_model.DMTimeCoordinate): pass + + +class Varlist(data_model.DMDataSet): + """Class to perform bookkeeping for the model variables requested by a + single POD for multiple cases/ensemble members + """ + + def find_var(self, v): + """If a variable matching *v* is already present in the Varlist, return + (a reference to) it (so that we don't try to add duplicates), otherwise + return None. + """ + for vv in self.iter_vars(): + if v == vv: + return vv + return None + + def setup_var(self, + model_paths: util.ModelDataPathManager, + case_name: str, + v: VarlistEntry, + data_convention: str, + date_range: util.DateRange): + """Update VarlistEntry fields with information that only becomes + available after DataManager and Diagnostic have been configured (ie, + only known at runtime, not from settings.jsonc.) + + Could arguably be moved into VarlistEntry's init, at the cost of + dependency inversion. + """ + translate = translation.VariableTranslator().get_convention(data_convention) + if v.T is not None: + v.change_coord( + 'T', + new_class={ + 'self': VarlistTimeCoordinate, + 'range': util.DateRange, + 'frequency': util.DateFrequency + }, + range=date_range, + calendar=util.NOTSET, + units=util.NOTSET + ) + v.dest_path = self.variable_dest_path(model_paths, case_name, v) + try: + trans_v = translate.translate(v) + v.translation = trans_v + # copy preferred gfdl post-processing component during translation + if hasattr(trans_v, "component"): + v.component = trans_v.component + except KeyError as exc: + # can happen in normal operation (eg. precip flux vs. rate) + chained_exc = util.PodConfigEvent((f"Deactivating {v.full_name} due to " + f"variable name translation: {str(exc)}.")) + # store but don't deactivate, because preprocessor.edit_request() + # may supply alternate variables + v.log.store_exception(chained_exc) + except Exception as exc: + chained_exc = util.chain_exc(exc, f"translating name of {v.full_name}.", + util.PodConfigError) + # store but don't deactivate, because preprocessor.edit_request() + # may supply alternate variables + v.log.store_exception(chained_exc) + # set the VarlistEntry env_vars (required for backwards compatibility with first-gen PODs) + v.set_env_vars() + + def variable_dest_path(self, + model_paths: util.ModelDataPathManager, + case_name: str, + var: VarlistEntry): + """Returns the absolute path of the POD's preprocessed, local copy of + the file containing the requested dataset. Files not following this + convention won't be found by the POD. + """ + if var.is_static: + f_name = f"{case_name}.{var.name}.static.nc" + return os.path.join(model_paths.MODEL_WORK_DIR[case_name], f_name) + else: + freq = var.T.frequency.format_local() + f_name = f"{case_name}.{var.name}.{freq}.nc" + return os.path.join(model_paths.MODEL_WORK_DIR[case_name], freq, f_name) + + @classmethod + def from_struct(cls, parent): + """Parse the "dimensions", "data" and "varlist" sections of the POD's + settings.jsonc file when instantiating a new :class:`Diagnostic` object. + + Args: + parent: instance of the parent class object (pod_setup.PodObject) + + Returns: + :py:obj:`dict`, keys are names of the dimensions in POD's convention, + values are :class:`PodDataDimension` objects. + """ + + def _pod_dimension_from_struct(name, dd, v_settings): + class_dict = { + 'X': VarlistXCoordinate, + 'Y': VarlistYCoordinate, + 'Z': VarlistVerticalCoordinate, + 'T': VarlistPlaceholderTimeCoordinate, + 'OTHER': VarlistCoordinate + } + try: + return data_model.coordinate_from_struct( + dd, class_dict=class_dict, name=name, + **v_settings.time_settings + ) + except Exception: + raise ValueError(f"Couldn't parse dimension entry for {name}: {dd}") + + def _iter_shallow_alternates(var): + """Iterator over all VarlistEntries referenced as alternates. Doesn't + traverse alternates of alternates, etc. + """ + for alt_vs in var.alternates: + yield from alt_vs + + vlist_settings = util.coerce_to_dataclass( + parent.pod_data, VarlistSettings) + globals_d = vlist_settings.global_settings + dims_d = {k: _pod_dimension_from_struct(k, v, vlist_settings) + for k, v in parent.pod_dims.items()} + + vlist_vars = { + k: VarlistEntry.from_struct(globals_d, dims_d, name=k, parent=parent, **v) + for k, v in parent.pod_vars.items() + } + for v in vlist_vars.values(): + # validate & replace names of alt vars with references to VE objects + for altv_name in _iter_shallow_alternates(v): + if altv_name not in vlist_vars: + raise ValueError((f"Unknown variable name {altv_name} listed " + f"in alternates for varlist entry {v.name}.")) + linked_alts = [] + for alts in v.alternates: + linked_alts.append([vlist_vars[v_name] for v_name in alts]) + v.alternates = linked_alts + return cls(contents=list(vlist_vars.values())) diff --git a/src/varlistentry_util.py b/src/varlistentry_util.py deleted file mode 100644 index 8eba4856e..000000000 --- a/src/varlistentry_util.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Definitions for statuses and other varlistentry attributes -""" -from src import util - -# The definitions and classes in the varlistentry_util module are separate from the -# diagnostic module to prevent circular module imports -VarlistEntryRequirement = util.MDTFEnum( - 'VarlistEntryRequirement', - 'REQUIRED OPTIONAL ALTERNATE AUX_COORDINATE', - module=__name__ -) -VarlistEntryRequirement.__doc__ = """ -:class:`util.MDTFEnum` used to track whether the DataSource is required to -provide data for the :class:`VarlistEntry`. -""" - -VarlistEntryStage = util.MDTFIntEnum( - 'VarlistEntryStage', - 'NOTSET INITED QUERIED FETCHED PREPROCESSED', - module=__name__ -) -VarlistEntryStage.__doc__ = """ -:class:`util.MDTFIntEnum` used to track the stages of processing of a -:class:`VarlistEntry` carried out by the DataSource. -""" diff --git a/src/verify_links.py b/src/verify_links.py index b84418095..f1a01e63c 100755 --- a/src/verify_links.py +++ b/src/verify_links.py @@ -9,11 +9,12 @@ Based on test_website by Dani Coleman, bundy@ucar.edu. """ import sys + # do version check before importing other stuff if sys.version_info[0] != 3 or sys.version_info[1] < 10: sys.exit("ERROR: MDTF currently only supports python >= 3.10.*. Please check " - "which version is on your $PATH (e.g. with `which python`.)\n" - f"Attempted to run with following python version:\n{sys.version}") + "which version is on your $PATH (e.g. with `which python`.)\n" + f"Attempted to run with following python version:\n{sys.version}") # passed; continue with imports import os import argparse @@ -27,6 +28,7 @@ from src import util import logging + _log = logging.getLogger(__name__) Link = collections.namedtuple('Link', ['origin', 'target']) @@ -38,11 +40,13 @@ target (str): URL referred to by the link. """ + class LinkParser(HTMLParser): """Custom subclass of :py:class:`~html.parser.HTMLParser` which constructs an iterable over each ```` tag. Adapted from ``__. """ + def reset(self): super(LinkParser, self).reset() self.links = iter([]) @@ -71,6 +75,7 @@ def __init__(self, root, rel_path_root=None, verbose=False, log=None): verbose (bool, default False): Set to True to print each file examined. """ + def munge_input_url(url): url_parts = urllib.parse.urlsplit(url) if not url_parts.scheme: @@ -90,9 +95,9 @@ def munge_input_url(url): self.verbose = verbose self.pod_name = None - # NB: WK_DIR isn't a "working directory"; it's just the base path + # NB: WORK_DIR isn't a "working directory"; it's just the base path # relative to which paths are reported - (self.root_url, self.WK_DIR, self.root_file) = munge_input_url(root) + (self.root_url, self.WORK_DIR, self.root_file) = munge_input_url(root) if rel_path_root: self.rel_path_root, _, _ = munge_input_url(rel_path_root) else: @@ -149,9 +154,9 @@ def check_one_url(self, link): # print('\nFailed to find file or connect to server.') # print('Reason: ', e.reason) tup = re.split(r"\[Errno 2\] No such file or directory: \'(.*)\'", - str(e.reason)) + str(e.reason)) if len(tup) == 3: - str_ = util.abbreviate_path(tup[1], self.WK_DIR, '$WK_DIR') + str_ = util.abbreviate_path(tup[1], self.WORK_DIR, '$WORK_DIR') else: str_ = str(e.reason) self.log.error("Missing '%s'.", str_, tags=util.ObjectLogTag.BANNER) @@ -161,8 +166,8 @@ def check_one_url(self, link): else: parser = LinkParser() links = [ - Link(origin=url, target=urllib.parse.urljoin(url, link_out)) \ - for link_out in self.gen_links(f, parser) + Link(origin=url, target=urllib.parse.urljoin(url, link_out)) + for link_out in self.gen_links(f, parser) ] f.close() return links @@ -209,7 +214,7 @@ def breadth_first(self, root_url): # restrict links to those that start with root_parent new_links = [ lnk for lnk in new_links if lnk.target not in known_urls \ - and lnk.target.startswith(root_parent) + and lnk.target.startswith(root_parent) ] queue.extend(new_links) # update known_urls so that we don't chase cycles @@ -251,11 +256,11 @@ def verify_pod_links(self, pod_name): their path relative to the POD's output directory. """ self.pod_name = pod_name - self.WK_DIR = util.remove_suffix( - util.remove_suffix(self.WK_DIR, os.sep), pod_name + self.WORK_DIR = util.remove_suffix( + util.remove_suffix(self.WORK_DIR, os.sep), pod_name ) if not self.root_file: - self.root_file = pod_name+'.html' + self.root_file = pod_name + '.html' root_url = urllib.parse.urljoin(self.root_url, self.root_file) missing = self.breadth_first(root_url) missing_dict = self.group_relative_links(missing) @@ -277,15 +282,17 @@ def verify_all_links(self): missing = self.breadth_first(root_url) return self.group_relative_links(missing) + # -------------------------------------------------------------- + if __name__ == '__main__': # Wrap input/output if we're called as a standalone script parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbose", action="store_true", - help="increase output verbosity") + help="increase output verbosity") parser.add_argument("path_or_url", - help="URL or filesystem path to the MDTF framework output directory.") + help="URL or filesystem path to the MDTF framework output directory.") args = parser.parse_args() # instead of print(), use root logger diff --git a/src/xr_parser.py b/src/xr_parser.py index cc982f4ca..2113c3edc 100644 --- a/src/xr_parser.py +++ b/src/xr_parser.py @@ -16,7 +16,7 @@ import xarray as xr import numpy as np -from src import util, units, core +from src import util, units import logging @@ -44,7 +44,7 @@ @util.mdtf_dataclass -class PlaceholderScalarCoordinate(): +class PlaceholderScalarCoordinate: """Dummy object used to describe `scalar coordinates `__ referred to by name only in the 'coordinates' attribute of a variable or @@ -412,7 +412,7 @@ class MDTFCFDataArrayAccessor( # ======================================================================== - +@util.mdtf_dataclass class DefaultDatasetParser: """Class containing MDTF-specific methods for cleaning and normalizing xarray metadata. @@ -420,41 +420,35 @@ class DefaultDatasetParser: Top-level methods are :meth:`parse` and :meth:`get_unmapped_names`. """ - def __init__(self, data_mgr, pod): + def __init__(self, config: util.NameSpace): """Constructor. Args: - data_mgr: DataSource instance calling the preprocessor. - pod (:class:`~src.diagnostic.Diagnostic`): POD whose variables are - being preprocessed. + config: runtime configuration """ - config = core.ConfigManager() self.disable = config.get('disable_preprocessor', False) self.overwrite_ds = config.get('overwrite_file_metadata', False) self.guess_names = False self.fallback_cal = 'proleptic_gregorian' # CF calendar used if no attribute found self.attrs_backup = dict() - self.log = pod.log # temporary - def setup(self, data_mgr, pod): - """Hook for use by child classes (currently unused) to do additional - configuration immediately before :meth:`parse` is called on each - variable for *pod*. + self.log = util.MDTFObjectLogger.get_logger(self._log_name) - Args: - data_mgr: DataSource instance calling the preprocessor. - pod (:class:`~src.diagnostic.Diagnostic`): POD whose variables are - being preprocessed. - """ - pass + @property + def _log_name(self): + return f"xr_parser_default_data_parser" # --- Methods for initial munging, prior to xarray.decode_cf ------------- - def guess_attr(self, attr_desc, attr_name, options, default=None, - comparison_func=None): + def guess_attr(self, + attr_desc: str, + attr_name: str, + options, + default: str = None, + comparison_func: str = None): """Select and return element of *options* equal to *attr_name*. - If none are equal, try a case-insensititve string match. + If none are equal, try a case-insensitive string match. Args: attr_desc (str): Description of the attribute (only used for log @@ -552,7 +546,7 @@ def normalize_attr(self, new_attr_d, d, key_name, key_startswith=None): # key was found with expected name; copy to new_attr_d new_attr_d[key_name] = d[key_name] - def normalize_calendar(self, attr_d): + def normalize_calendar(self, attr_d: dict): """Finds the calendar attribute, if present, and normalizes it to one of the values in the CF standard before `xarray.decode_cf() `__ @@ -621,7 +615,7 @@ def _restore_one(name, attrs_d): # log warning but still update attrs self.log.warning("%s: discrepancy for attr '%s': '%s' != '%s'.", name, k, vv, attrs_d[k]) - elif hasattr(v, '__iter__') and not isinstance(v, str) and v.any() not in attrs_d[k] \ + elif hasattr(v, '__iter__') and not isinstance(v, str) and v not in attrs_d[k] \ or v != attrs_d[k]: self.log.warning("%s: discrepancy for attr '%s': '%s' != '%s'.", name, k, v, attrs_d[k]) @@ -1255,7 +1249,7 @@ def check_ds_attrs(self, var, ds): # --- Top-level methods ----------------------------------------------- - def parse(self, var, ds): + def parse(self, var, ds: xr.Dataset): """Calls the above metadata parsing functions in the intended order; intended to be called immediately after the Dataset *ds* is opened. @@ -1285,19 +1279,18 @@ def parse(self, var, ds): Except in specific cases, attributes of *var* are updated to reflect the 'ground truth' of data in *ds*. """ - if var is not None: - self.log = var.log + self.normalize_pre_decode(ds) ds = xr.decode_cf(ds, decode_coords=True, # parse coords attr decode_times=True, use_cftime=True # use cftime instead of np.datetime64 ) - ds = ds.cf.guess_coord_axis() + # ds = ds.cf.guess_coord_axis() # may not need this self.restore_attrs_backup(ds) - self.normalize_metadata(var, ds) + #self.normalize_metadata(var, ds) self.check_calendar(ds) - self._post_normalize_hook(var, ds) + #self._post_normalize_hook(var, ds) if self.disable: return ds # stop here; don't attempt to reconcile @@ -1337,27 +1330,3 @@ def get_unmapped_names(ds): if (ref not in all_arr_names) and (ref not in all_attr_names): missing_refs[ref] = lookup[ref] return missing_refs - - -class MultirunDefaultDatasetParser(DefaultDatasetParser): - """Class containing MDTF-specific methods for cleaning and normalizing - xarray metadata. Methods reference data_mgr only. The data_mgr references the pod - object that contains the cases, rather than a case object with all of the pods. - - Top-level methods are :meth:`parse` and :meth:`get_unmapped_names`. - """ - - def __init__(self, data_mgr): - """Constructor. - - Args: - data_mgr: DataSource instance calling the preprocessor: src.diagnostic.MultirunDiagnostic - """ - config = core.ConfigManager() - self.disable = config.get('disable_preprocessor', False) - self.overwrite_ds = config.get('overwrite_file_metadata', False) - self.guess_names = False - - self.fallback_cal = 'proleptic_gregorian' # CF calendar used if no attribute found - self.attrs_backup = dict() - self.log = data_mgr.log # temporary diff --git a/templates/runtime_config.jsonc b/templates/runtime_config.jsonc new file mode 100755 index 000000000..a27d79d10 --- /dev/null +++ b/templates/runtime_config.jsonc @@ -0,0 +1,120 @@ +// This a template for configuring MDTF to run PODs that analyze multi-run/ensemble data +// +// Copy this file, rename it, and customize the settings as needed +// Pass your file to the framework using the -f/--input-file flag. +// Any other explicit command line options will override what's listed here. +// +// All text to the right of an unquoted "//" is a comment and ignored, as well +// as blank lines (JSONC quasi-standard.) +// +// Remove your test config file, or any changes you make to this template if you do not rename it, +// from your remote repository before you submit a PR for review. +// To generate CMIP synthetic data in the example dataset, run the following: +// > mamba env create --force -q -f ./src/conda/_env_synthetic_data.yml +// > conda activate _MDTF_synthetic_data +// > pip install mdtf-test-data +// > cd /mdtf +// > mkdir mdtf_test_data && cd mdtf_test_data +// > mdtf_synthetic.py -c CMIP --startyear 1980 --nyears 5 +// > mdtf_synthetic.py -c CMIP --startyear 1985 --nyears 5 +// Note that MODEL_DATA_ROOT assumes that mdtf_test_data is one directory above MDTF-diagnostics +// in this sample config file +{ + // Run each ensemble on the example POD. + // Add other PODs that work on ensemble datasets to the pod_list as needed + "pod_list" : [ + //"example" + "example_multicase" + ], + // Each case corresponds to a different simulation/output dataset + // startdate, enddate: either YYYY-MM-DD, YYYYMMDD:HHMMSS, or YYYY-MM-DD:HHMMSS + "case_list": + { + "CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231": + { + "model": "test", + "convention": "CMIP", + "startdate": "19800101120000", + "enddate": "19841231000000" + } + , + "CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231": + { + "model": "test", + "convention": "CMIP", + "startdate": "19850101", + "enddate": "19891231" + } + }, + // PATHS --------------------------------------------------------------------- + // Location of supporting data downloaded when the framework was installed. + // If a relative path is given, it's resolved relative to the MDTF-diagnostics + // code directory. Environment variables (eg, $HOME) can be referenced with a + // "$" and will be expended to their current values when the framework runs. + // Full path to model data ESM-intake catalog header file + //"DATA_CATALOG": "/net/jml/mdtf/MDTF-diagnostics/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json", + "DATA_CATALOG": "/Users/jess/mdtf/MDTF-diagnostics/diagnostics/example_multicase/esm_catalog_CMIP_synthetic_r1i1p1f1_gr1.json", + // Backwards compatibility + "MODEL_DATA_ROOT": "../inputdata/mdtf_test_data", + // Parent directory containing observational data used by individual PODs. + "OBS_DATA_ROOT": "../inputdata/obs_data", + + // Working directory. + "WORK_DIR": "../wkdir", + + // Directory to write output. The results of each run of the framework will be + // put in a subdirectory of this directory. Defaults to WORKING_DIR if blank. + "OUTPUT_DIR": "../wkdir", + + // Location of the Anaconda/miniconda or micromamba installation to use for managing + // dependencies (path returned by running `conda info --base` or `micromamba info`.) + //"conda_root": "/net/jml/miniconda3", + "conda_root": "/Users/jess/micromamba", + + // Directory containing the framework-specific conda environments. This should + // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left + // blank, the framework will look for its environments in conda_root/envs + //"conda_env_root": "/net/jml/miniconda3/envs", + "conda_env_root": "/Users/jess/micromamba/envs", + + // Location of the micromamba executable. Required if using micromamba + "micromamba_exe": "/Users/jess/.local/bin/micromamba", + + // SETTINGS ------------------------------------------------------------------ + // Any command-line option recognized by the mdtf script (type `mdtf --help`) + // can be set here, in the form "flag name": "desired setting". + + // Settings affecting what output is generated: + // Set to true to run the preprocessor; default true: + "run_pp": true, + // Set to true to perform data translation; default false: + "translate_data": true, + // Set to true to have PODs save postscript figures in addition to bitmaps. + "save_ps": false, + + // Set to true for files > 4 GB + "large_file": false, + + // If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs + // run to completion + "save_pp_data": true, + + // Set to true to save HTML and bitmap plots in a .tar file. + "make_variab_tar": false, + + // Generate html output for multiple figures per case + "make_multicase_figure_html": false, + + // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved + // under a unique name. + "overwrite": false, + + // List with custom preprocessing script(s) to run on data + // Place these scripts in the sites/local directory of your copy of the MDTF-diagnostics repository + "user_pp_scripts" : ["example_pp_script.py"], + + // Settings used in debugging: + + // Log verbosity level. + "verbose": 1 +} diff --git a/templates/runtime_config.yml b/templates/runtime_config.yml new file mode 100755 index 000000000..6c561976c --- /dev/null +++ b/templates/runtime_config.yml @@ -0,0 +1,73 @@ +# Runtime yaml configuration file template for the MDTF-diagnostics package +# Create a copy of this file for personal use, and pass it to the framework +# with the -f | --configfile flag + +# List of POD(s) to run +pod_list: + - "example_multicase" + - "MJO_suite" + +# Case list entries (must be unique IDs for each simulation) +case_list: + "CMIP_Synthetic_r1i1p1f1_gr1_19800101-19841231" : + model: "test" + convention: "CMIP" + startdate: "19800101:000000" + enddate: "19841231:000000" + + "CMIP_Synthetic_r1i1p1f1_gr1_19850101-19891231" : + model: "test" + convention: "CMIP" + startdate: "19850101:000000" + enddate: "19891231:000000" + +### Data location settings ### +# Required: full path to ESM-intake catalog header file +#DATA_CATALOG: "/local2/home/mdtf/MDTF-diagnostics/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.json" +DATA_CATALOG: "/local2/home/mdtf/MDTF-diagnostics/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_at +# Optional: Parent directory containing observational data used by individual PODs. +# If defined, the framework assumes observational data is in OBS_DATA_ROOT/[POD_NAME] +OBS_DATA_ROOT: "../inputdata/obs_data" +# Required: Working directory location. Temporary files are written here. +# Final output is also written here if the OUTPUT_DIR is not defined. +WORK_DIR: "../wkdir" +# Optional: Location to write final output if you don't want it in the wkdir +OUTPUT_DIR: "" +### Environment Settings ### +# Required: Location of the Anaconda/miniconda installation to use for managing +# dependencies (path returned by running `conda info --base`.) +conda_root": "" +# Optional: Directory containing the framework-specific conda environments. This should +# be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left +# blank, the framework will look for its environments in conda_root/envs +conda_env_root": "" +# Location of micromamba executable; REQUIRED if using micromamba +micromamba_exe: "" +### Data type settings ### +# set to true to handle data files > 4 GB +large_file: False +### Output Settings ### +# Set to true to have PODs save postscript figures in addition to bitmaps. +save_ps: False +# If true, leave pp data in OUTPUT_DIR after preprocessing; if false, delete pp data after PODs +# run to completion +save_pp_data: True +# Set to true to perform data translation; default is True: +translate_data: True +# Set to true to save HTML and bitmap plots in a .tar file. +make_variab_tar: False +# Set to true to overwrite results in OUTPUT_DIR; otherwise results saved +# under a unique name. +overwrite: False +# Generate html output for multiple figures per case +"make_multicase_figure_html": False +### Developer settings ### +# Set to True to run the preprocessor +run_pp: True +# Set to True to run unit tests +unit_test: False +# Additional custom preprocessing scripts to run on data +# place these scripts in the sites/local directory +# The framework will run the specified scripts whether run_pp is set to True or False +user_pp_scripts: + - "" \ No newline at end of file diff --git a/tests/checksums/checksum_model_data.yml b/tests/checksums/checksum_model_data.yml deleted file mode 100644 index f6aee3ce4..000000000 --- a/tests/checksums/checksum_model_data.yml +++ /dev/null @@ -1,32 +0,0 @@ - -# This file was produced by make_file_checksums.py and is used by the -# test_*_checksums.py unit tests. Don't modify it by hand! -# -GFDL.CM4.c96L32.am4g10r8: - day/GFDL.CM4.c96L32.am4g10r8.WVP.day.nc: bfb77301e68e490a74caf6c92c37b0a0 - day/GFDL.CM4.c96L32.am4g10r8.precip.day.nc: 1791f4821159e4373fcf074285514a69 - day/GFDL.CM4.c96L32.am4g10r8.sphum.day.nc: 69a0e50ed517d777f9bca416a29b43a0 -Lmon_GISS-E2-H_historical_r1i1p1: - mon/Lmon_GISS-E2-H_historical_r1i1p1.evspsbl.mon.nc: b059be13ac76116c69a901021173f8e7 - mon/Lmon_GISS-E2-H_historical_r1i1p1.mrsos.mon.nc: 70677bce4d82e6bda044416e541a4b55 - mon/Lmon_GISS-E2-H_historical_r1i1p1.pr.mon.nc: c0995b3bc924532fce456fb994f6f3d1 - mon/pr_Lmon_GISS-E2-H_historical_r1i1p1_195001-200512.nc: dc13946a78cc1369394245165b6510fb -QBOi.EXP1.AMIP.001: - 1hr/QBOi.EXP1.AMIP.001.PRECT.1hr.nc: dc0c05eeb7c535e6fee750c4bdc82a35 - 1hr/QBOi.EXP1.AMIP.001.prw.1hr.nc: 8f146feabfd93ec7ae54a0d33a9ae53e - 1hr/QBOi.EXP1.AMIP.001.qsat_int.1hr.nc: 6489cd4ea8f74a10b66a556e0e2435a8 - 1hr/QBOi.EXP1.AMIP.001.tave.1hr.nc: 951094089af1590d3cc00c0dd5b71bdb - 3hr/QBOi.EXP1.AMIP.001.PRECT.3hr.nc: 0d879e7d20d71ead81e454005013be36 - day/QBOi.EXP1.AMIP.001.FLUT.day.nc: 1a41ab921ca539c2624dc6d6e6e07921 - day/QBOi.EXP1.AMIP.001.OMEGA500.day.nc: ded7f77156ebc34be4e30b00c81c9dfe - day/QBOi.EXP1.AMIP.001.PRECT.day.nc: b5eef5ca2610063e6e2283957540833a - day/QBOi.EXP1.AMIP.001.T250.day.nc: 26cd9d78022de1a58028bbc16cd6e5bb - day/QBOi.EXP1.AMIP.001.U200.day.nc: 798752b1b200a4bbd636c088d6eb4d15 - day/QBOi.EXP1.AMIP.001.U250.day.nc: cca183e077dac1638fe472f4f124084e - day/QBOi.EXP1.AMIP.001.U850.day.nc: a5ecd9c563d99c544d0da6bf890b9f85 - day/QBOi.EXP1.AMIP.001.V200.day.nc: 5a7eb3033145b8fc701b475c1e3bbe75 - day/QBOi.EXP1.AMIP.001.V850.day.nc: 4ccc39a9339eac4eeb75ce918d7c3840 - day/QBOi.EXP1.AMIP.001.Z250.day.nc: 304221d611e4548bdd6ebfeda741ad5c - mjo_wvn_freq_QBOi.EXP1.AMIP.001.winter.eps: aad8339bbc23d5b1056c7f6baf197910b70f093cea72cc24eb7c172b5dda5378 - mon/QBOi.EXP1.AMIP.001.PS.mon.nc: 34b2aedfee046619d6617af05cfc7695 - mon/QBOi.EXP1.AMIP.001.Z3.mon.nc: 91fbaba23e4e3e55ad8b5b3d4d285ec5 diff --git a/tests/checksums/checksum_obs_data.yml b/tests/checksums/checksum_obs_data.yml deleted file mode 100644 index 6cd7b04a3..000000000 --- a/tests/checksums/checksum_obs_data.yml +++ /dev/null @@ -1,202 +0,0 @@ - -# This file was produced by make_file_checksums.py and is used by the -# test_*_checksums.py unit tests. Don't modify it by hand! -# -EOF_500hPa: - NCEP.natl.eof1.gif: dda76e56c9025c8786d498c23263a37968b9e8ef5dabbf88eb70a9f57bcd5d17 - NCEP.npac.eof1.gif: 8724e271faa1fba6e1f27c4308d7b70a34ae2ac20ef5815f42d167987cfe62bf -MJO_prop_amp: - fig1a_trim.png: 58e087b3f3bd6116d590de3532fec2bdd932bf4c64aec2bad4fada185e56e26a - fig1c_trim.png: 71559e16d0f0ddb6bfe7ebd684bba670efb5a13babaff0fc7ba6ac50a8f7f5db - fig2a_trim.png: 36944761bf295707f73ed054f33c01a2a6c9c175133624934d9a32aa93b19dc5 - obs_hovmoller.nc: 76e3f28455209bcfb567e34b24a8d8ba - obs_hus_900_650_mean.nc: 6df2bef38afacd217193c21858e18a72 -MJO_suite: - ERA.MJO.lag.lat.lon.annual.png: ea02eed6a1ae8a768ccb40ac5cd930294518820e8ac974f320a1a13af281a674 - ERA.MJO.lag.lat.lon.summer.png: 3622d8853fefe9660f22aed1a2e7174b65a423e2cd2713bba3b7b377ac02ea6c - ERA.MJO.lag.lat.lon.winter.png: 5dce3d5b37d52ad160af63659f57cf6b33e5690468dfe033213218b4ef772fa2 - ERA.MJO.life.cycle-0.png: 67fb0fc13ad1bdc5f545ed56dbc6844430764219dea5b5c98cd679b44ff2a1d8 - ERA.MJO.life.cycle-1.png: 3fc7aff7ec86e19a99a24909cdf95352710bd988f32e47d0c4233eb3b2fdb49d - ERA.rlut.EOF.summer-0.png: 178e48aa2a786d8777aacff0c6f96068506a7d617065e19c1dcaf856b2eafdfe - ERA.rlut.EOF.summer-1.png: 93de27daa1fca2ca9cf95177a06765a86620be37be7d4db6d724a1a74a4c9563 - ERA.rlut.EOF.winter-0.png: b9a7d90dc4b56a561e668090e6246c5b0aa59a96f1629168f88fa16115d09101 - ERA.rlut.EOF.winter-1.png: 68085db956fda105cf16c23df1a6384d2fd6110b373920577c33c3736ca94fd7 - ERA.rlut.MJO.wave.freq.summer.png: 07f397ac48a6bb818348d4609a87f14e0b8e7510d9cef52888942b415203a33c - ERA.rlut.MJO.wave.freq.winter.png: fe6cd04d8db20cd64a4a00a29bd62ec74cba15a4e0aa48f2f0506357ab50ceed - ERA.u200.EOF.summer-0.png: 482f55cc1de7284feb9a18a89c0ea9eb81cb1d4d493c937e973aba7d1b657b75 - ERA.u200.EOF.summer-1.png: 79233844b5d9c68198daf1af17ef07f79e93fd2a0bba7233a5a29025920a5c9f - ERA.u200.EOF.winter-0.png: 3ca569dae0ad2d51c319b684f07d322f94431fff29eeec01fa6cf10362b837b5 - ERA.u200.EOF.winter-1.png: 150b029ff62a8bff3de04c3779cf1dc805925e335d4fcd5c191a3bec459e08cb - ERA.u200.MJO.wave.freq.summer.png: 304f5327db9ff66dd6d35968021d4fe7e068e6f1c1c0f49ae82e0bf1c6c8383e - ERA.u200.MJO.wave.freq.winter.png: cc3772bd5a818b66bd47e273bda5732d12436f2420c8acb5dbc4d88ab468237e - ERA.u850.EOF.summer-0.png: d2c1c25018df5f0c24b8f57c17a655685b31d3648c2993c5cbca0a5f453a6660 - ERA.u850.EOF.summer-1.png: a384fe9370304410976632f3f0ce2cc9221f60903430ce182ec0d67d67ff6171 - ERA.u850.EOF.winter-0.png: 5dada9c5058aa18ff95d6421a7cbb3b891f52daa8007c3949a42a8141fac14fc - ERA.u850.EOF.winter-1.png: c4f1da45654271e0c5242a46f1261c9954c7ff322b1a56f8dd53f62fb47ccce0 - ERA.u850.MJO.wave.freq.summer.png: 9e46d3e54809f911a6d49306efa929cc2d73d6463e23bd4ffe3d862c647c6bd8 - ERA.u850.MJO.wave.freq.winter.png: d768fcdb525c0066993f21cc896c1c2ef9ea880bc38635e2bc18d19a6e36575f - ERA.v200.EOF.summer-0.png: 7de16a33d8a216c6b222fb3fa9b7a6192060ce5507cd09030faf86595ef48f17 - ERA.v200.EOF.summer-1.png: 7159eb4facf6d60511ef52b9f42f7d84ecf9135fab9e00e325d2454973076a96 - ERA.v200.EOF.winter-0.png: 638da40564b33f8a21dc339e9b1a8a21903f7db11ec643400ecc7756944b6a1e - ERA.v200.EOF.winter-1.png: 2289b83975dd2a53a71625c2b981ab78d9809c6a48ae6d0fe7d2e4fa5278a777 - ERA.v200.MJO.wave.freq.summer.png: f00d915922ba63862d8298023ff64cb2bcb446e19ca7b831f035f8594935b246 - ERA.v200.MJO.wave.freq.winter.png: e11a40e5b7cc92072f3dfb02a63847cf6951179d41911db0eb3b753cfea0151e - ERA.v850.EOF.summer-0.png: cef5a129d83095b204cd9586180f4dfa03deda00a0dc5733e334145c93a7d15c - ERA.v850.EOF.summer-1.png: 7cee81de8ef13e2a098a6da79e8eefb4ef7f62f54dc388309c73e46f604716da - ERA.v850.EOF.winter-0.png: d29cf03d9280c4fc40821022c3f5b21fc187eb542e7382096c1379cbbb1f09cc - ERA.v850.EOF.winter-1.png: 8668981f42e40964dc50cd2031ebabe5c29ce6ae221b07c2a8af86171286991d - ERA.v850.MJO.wave.freq.summer.png: a127655ccbcefdac91cf8f334a5a369b77a75a58da26f9e4b77a3d4e5895ef5d - ERA.v850.MJO.wave.freq.winter.png: 7485208c0420f3eb04c279de40ff152b72304ba5e4d56de1211a46e8e6c0b90d - NCEP.MJO.gif: 612a2f25a09dfb0f0a6bfa584faa3d2a36dedf8cfa2a18155f23bf1feeed2d98 - TRMM.pr.EOF.summer-0.png: b0f0a81ca3c8da773b53cb5574e3d6e74c99a7d52156ca41d9e5eec3e608d7ac - TRMM.pr.EOF.summer-1.png: d18f9e35c415201f46e3cacf0fc3e0f3a035d8a47a75b97e2653a1ef31e26a0e - TRMM.pr.EOF.winter-0.png: d0b937c97e3dda4aa34f39d02ab3c7f2c1bfff530d3dc7e77a2a0e8e2780993a - TRMM.pr.EOF.winter-1.png: a1a7be82a9d3c719e35f363fbd0dae5b80c9f45e3ecc1a2610019d491e4735a4 - TRMM.pr.MJO.wave.freq.summer.png: b4a09f8d10dcc85a7924581d522c1c9c5a86676366fe172b6bfb52e98ab968d0 - TRMM.pr.MJO.wave.freq.winter.png: a9bc67a7710eb08b5c6a058d9814ed3deb73f6026a559ba75399488480b20b1c -MJO_teleconnection: - ERAi.1979_2016.DJF.mean.nc: 35d87f74eca1cb982d2ba807b7f37cbd - EWratio_wf_CMIP5: f7c5326d4fb692aaf35d806a99568cb0 - GPCP_composites.nc: 7e5a140a36fe5a5afd4365780d593cb5 - PR_composites_CNRM_CM5.nc: 96bbf48d4bf4c56f8e999ba2ff35ccb7 - PR_composites_GFDL_CM3.nc: 6c974722db36f4d6803bfbb5d7de5e45 - PR_composites_MIROC5.nc: 53913ce43344b58b6670ee8dedf340a7 - PR_composites_MRI_CGCM3.nc: 08e5fb7cba0a832dc9bdc01718fc9d6b - PR_composites_NorESM1_M.nc: be07583e9eafb9b45cdaf516b0d86757 - PR_composites_bcc_csm1_1.nc: c70e7b9a986468cce42295348f081779 - U250_RMS: 5068bff25a91cc5e7561216d70a38d2d - U250_RMS_jetext: 81429de35d42987076e4994619322ac6 - WH04_EOFstruc.txt: fc66aa611b03dd26b18701fbdbf853e2 - ccr_Z250comp_CMIP5: 4a3dba49fac3115b74e8d7b41fcd0857 - geop_compositesP_hgt250.nc: e4448d3a48a408f32e1ab9bee97fab3e - geop_compositesP_hgt250_CNRM_CM5.nc: 207c5954b6d8817157b537240095428b - geop_compositesP_hgt250_GFDL_CM3.nc: 0232f07cf6d2e11f463c3be510697bda - geop_compositesP_hgt250_MIROC5.nc: 1276e5087f79f2b0744c9fa922d3cefe - geop_compositesP_hgt250_MRI_CGCM3.nc: 2127a3bbcda0e854f62807c3425e965a - geop_compositesP_hgt250_NorESM1_M.nc: 86b60e97bb29d737c73529d1743e750f - geop_compositesP_hgt250_bcc_csm1_1.nc: 54a4fa30e2a306dd0ef57a68bb1a564d - t_table_data.txt: caea0eceaa8754ca40732956dfccdbca - tstatP_hgt250.nc: 9501ce93e5802f55b801ad232be86930 - tstatP_hgt250_CNRM_CM5.nc: d65719a4b951f96fef87c9a32572819e - tstatP_hgt250_GFDL_CM3.nc: 66d5071c3104a992cd1105862077d12c - tstatP_hgt250_MIROC5.nc: 913a7b62433bee1aa8428b9c72853a78 - tstatP_hgt250_MRI_CGCM3.nc: 03261ccbe18d29d6fb9785776d231321 - tstatP_hgt250_NorESM1_M.nc: 2e2327387706b2121bfcf79c39485fb6 - tstatP_hgt250_bcc_csm1_1.nc: cbf5eeee15cc6aa7cc859b49e514b64f -SM_ET_coupling: - corr_mrsos_evspsbl_summer_2x2_allmodels.RData: cfbf24f514f6e59c5d5951d5568385a4 - corr_smsurf_E_GLEAM_1980_2014.RData: 8e5287bd6a65cc824ffb40341f8af70a - lat.RData: 5740269615b7ecb0954d68dcd60cd7e8 - lat_GLEAM.RData: 7550ae7b39aaa1321fd7e2e2b54ad602 - lat_GLEAM_P.RData: 0226783c4dfc0b31c5e9d51aa1253492 - list_models_mrsos_PCMDI.RData: 39fb0bfe98b9a2af52dc22663a94ce91 - list_models_pr_long.RData: 553955f86e6a91d4605163cf8646cc2b - lon.RData: 644c5a02a9bde314c19ac4aa3df0842c - lon_GLEAM.RData: c82d801ce35dcf8add428f430324423d - lon_GLEAM_P.RData: c6451ebf37d5a223e80f96f63c763658 - mask_2x2_NAs.RData: 097492a65366d7fd4c93f35a2a73508f - mean_evap_summer_GLEAM_1980_2014.RData: da70f16307e7c641a598d20b0638a7fe - mean_pr_summer_2x2_allmodels.RData: ace3ad4908dbeac1493c1a729bf3b959 - mean_precip_summer_GLEAM_1980_2014.RData: fc722e842a0b91eee812675f2d176b21 - mean_smsurf_summer_GLEAM_1980_2014.RData: bacb3d5685e2ab8194ac49faa5b3fc54 -Wheeler_Kiladis: - Fig1.Asym.TRMM_pr.png: d846c39703e1ce307b72c8665a40ea3f57462f02ef3c665e688ef0344ce9473f - Fig1.Asym.omega500.NCEP.gif: 005c4c450926750e5bc828cf900291eee7c5fe8f007a2027e0908522a6777c42 - Fig1.Asym.pr.NCEP.gif: 0cda044f1467b888a254f876385569e4afdb6f9cd0f0f0260a48bdb15e56be30 - Fig1.Asym.rlut.NCEP.gif: 9ef8a48bcbd7c572031af008991865208e8237bf5c2510d4c171513181c82783 - Fig1.Asym.rlut.NOAA.gif: faa0ab3321864019893e939b14375c0c40f03b6952dbac8538bf5fdec75a0a94 - Fig1.Asym.u200.NCEP.gif: 852635cbe278b5fc73a90e8518f6d8050b997d656bd8578464cf0e9a7b98fd33 - Fig1.Asym.u850.NCEP.gif: 6de677ded72a40037b82956e0047d3ef753e450e9c6442f13a1de4053f0b6fc3 - Fig1.Sym.TRMM_pr.png: ba4e03c25d7ef73abaeed07d5c897496dbbd36eadb53e441a51075cc0b6359de - Fig1.Sym.omega500.NCEP.gif: 70486b40b13f6aa3dd619283850472d6ae17457ff22aec02a12a83a0f02331ff - Fig1.Sym.pr.NCEP.gif: dce0e720fb678fc8d0aebf6676330c33f94d6c473b5b9ddc320c8fb3cb55fb47 - Fig1.Sym.rlut.NCEP.gif: ec421bb315c00d67633f7b74ce186a6598e7518fa1c6912b8e3d561fb3f108ab - Fig1.Sym.rlut.NOAA.gif: 274e699b124cf0287c96c3a337eb0ddea54f52aefe40df90d534b1f87d115a4e - Fig1.Sym.u200.NCEP.gif: 2b873178ef0fe5351f9f8fc9526d5dca4b2c23f963b877a333288c8cf8bc3570 - Fig1.Sym.u850.NCEP.gif: d0b09837501e77c348c1a3e4a65c1c045378353da805b8f7a8839fc13bf72da4 - Fig2.TRMM_pr.png: ecc54010f244118ca758e4d7eec519e1a559bf8e34a78086f9a5bc8f0c953116 - Fig2.omega500.NCEP.gif: 23482d48aa8255f9435731c49a0110237f6f810c22ce23a5cacbed62fe984ba7 - Fig2.pr.NCEP.gif: f8a64fc27d6337537a7226258b9f95a0962f8886e723e550878f0832bcfabe1b - Fig2.rlut.NCEP.gif: b4b70c4b608641e408627d6f8f8329b4902ce48ecdb899f472a6b8ce6ba9dc3a - Fig2.rlut.NOAA.gif: c7eadc3dbc7f613ac22fe7a127329a9d2dd2d85d834c82e229870adef7df0d66 - Fig2.u200.NCEP.gif: 952c0e23288e87e5a8c7783858b757aeccef5894f0341da90db1673a7a1b9760 - Fig2.u850.NCEP.gif: ba88920fcf6bcf9c2218d6217e056836b4b0a3dca1e84a396ef013a32b5e3578 - Fig3.Asym.TRMM_pr.png: 6bf1bc66e69f7c9a3bac397eabe07feb1f7fcadf431142fb6986a6e45a245ea7 - Fig3.Asym.omega500.NCEP.gif: 077cea6e095b5c515ce520949488448cd689be376d894844233244b54bfe14a7 - Fig3.Asym.pr.NCEP.gif: fd5f340253ea03aa4feae12f164336ddbf130673790e3ba1ac618f35e899b7a1 - Fig3.Asym.rlut.NCEP.gif: fea21a7fcc056d85320164539633549b73a41c44210b0e44f4a014deba872716 - Fig3.Asym.rlut.NOAA.gif: 859b57f77a0dfcdcf30e042ca0c0756c0dbb88aac567bc58259010fdcba65a1a - Fig3.Asym.u200.NCEP.gif: 9c4249f3159b572ae926c029cc052ea0b39b852fb430391a3c0b1151836e6c97 - Fig3.Asym.u850.NCEP.gif: 5950f27375f8db391767f79e42499f997e68a8430bcbe656b55a6d8e7b435713 - Fig3.Sym.TRMM_pr.png: 518f31b248b54c760b8e5536710f14f9333a94c398d63ed65f9e66d2e3409e1a - Fig3.Sym.omega500.NCEP.gif: 51cf9b428410803ab51926d990d29ba7af53cbcfe25c9578ec084ea256b150b2 - Fig3.Sym.pr.NCEP.gif: 30fd949403a47233276506647f9c6c74a3fb704869f28e408fc65218e3719f9f - Fig3.Sym.rlut.NCEP.gif: 805d1bf44162b2d8d6904a1007fb2e14dc80c561e2020e3c3502078cded0408d - Fig3.Sym.rlut.NOAA.gif: b0b0681e7f2660a5fbaad9f4190ba878f11a83382c17a07a04e687bb12265ca6 - Fig3.Sym.u200.NCEP.gif: d731e4d921249dfe378bb6a1beedf187daffbfd44525ab7811c4fcd0efc65de6 - Fig3.Sym.u850.NCEP.gif: 34623eed42e3243e5c17a6425e6f2a8791f9e3b8def37419ced7a17d09d44149 -convective_transition_diag: - convecTransBasic_R2TMIv7r1_200206_201405_res=0.25_fillNrCWV_qsat_int.nc: 1dfc89f2fd69b22720a03a6f5ddc8de6 - convecTransBasic_R2TMIv7r1_200206_201405_res=0.25_fillNrCWV_tave.nc: a8394c21208d71bd6a2ebb53f7de7a92 - convecTransBasic_R2TMIv7r1_200206_201405_res=0.50_fillNrCWV_qsat_int.nc: bfb47e9c3aa9d70a1563bb145df99db9 - convecTransBasic_R2TMIv7r1_200206_201405_res=0.50_fillNrCWV_tave.nc: efd3211dee25d88aeab0ccbd21533c09 - convecTransBasic_R2TMIv7r1_200206_201405_res=1.00_fillNrCWV_qsat_int.nc: af4008e9ebee25dea6045c581f4b5028 - convecTransBasic_R2TMIv7r1_200206_201405_res=1.00_fillNrCWV_tave.nc: 691bacc2e19942f4b1d7ed344425fe84 - region_0.25x0.25_costal2.5degExcluded.mat: dc126a6ece17c7cbc15fc404a6f7a7b9 -precip_diurnal_cycle: - TRMM_DJF.nc: b401bf408342d594498c2fb6cd379162 - TRMM_JJA.nc: 4cc8963c82fd827f69c7d089b5c4f1d6 - TRMM_pr.50N50S.dc.AMJ.png: 84ea51f54dc03ca4ee7580bca4cd368305ea1ec9a2654424ea0a0eaebf365979 - TRMM_pr.50N50S.dc.ANN.png: 669afa6931207326fc04817150f45c198715d29439e3cbbf4318e0a79b8c08a1 - TRMM_pr.50N50S.dc.APR.png: 317989837dc5011222133530c6b42522d969aaeb4d73c2b7944052ebd74253ba - TRMM_pr.50N50S.dc.ASO.png: 4aba05d02f8a5e681c5216a5962027ca9bfb39381866e47356e21607aa477f1b - TRMM_pr.50N50S.dc.AUG.png: 865968f8a91b7ea15c91982a225ac99a16d6bbf2528769c8efd65e346fa122bf - TRMM_pr.50N50S.dc.DEC.png: 720e1346f9066edc20b8f8519df5cb7cfe5822e34ce9001c7ff57c827c0a466e - TRMM_pr.50N50S.dc.DJF.png: 46f5c0b218af0a3c537ec423851a7e6d2931cd3302faf86879ceb651d12c1378 - TRMM_pr.50N50S.dc.FEB.png: 5d32c5b9c510dffeca32bcabe41c3133e9bdf3d2194d06ac41d0f9a606425e5e - TRMM_pr.50N50S.dc.FMA.png: 2a23bed7e8c2abfd015753323d2713641c8c6581ca29cb50d7b13ee7105a6bdd - TRMM_pr.50N50S.dc.JAN.png: d8692bbaa0b551a4ace1b1e4acfe0de39a984a38de9ef701d969f689ddbe965d - TRMM_pr.50N50S.dc.JAS.png: 1b01f9ccf6d608b1b6664d01d99bb79a0900893ba652c49a74d584e9420c7c3d - TRMM_pr.50N50S.dc.JFM.png: 1ed1c5ae2405a2fc2c6bd4f09c481741134755c464e32e6538802ca803dd4ed8 - TRMM_pr.50N50S.dc.JJA.png: d8eff81c2f8a152317d2a53df878a62c3e031d30bc5a5d7ef3dbccc6ba1a7af9 - TRMM_pr.50N50S.dc.JUL.png: ec621d6287352e0557b5efdb010b86e0ce98b8e03c59af4b5eaef487f0d5596a - TRMM_pr.50N50S.dc.JUN.png: 8e00fa3c7d14beb7ad6ee22d495947505307f520e0522b13fc79aa0e0e9fe586 - TRMM_pr.50N50S.dc.MAM.png: 7f92a9b1d2818cf61c8182e81bcedfb6c3c447c2193ecd5b463d039643f28cd7 - TRMM_pr.50N50S.dc.MAR.png: 14cf4f9ea3b791a050ab610cb5ee5e5c2aeba3dac9464991031a09bb24cb5df3 - TRMM_pr.50N50S.dc.MAY.png: a6b69d156e38c2e183328373e2263d906649b63f2ad0d10824f27c8d09e9650e - TRMM_pr.50N50S.dc.MJJ.png: 4c1c21e7680cc4daa834503e7d5600e697ae551c0f5d1497c3212cd1c8394389 - TRMM_pr.50N50S.dc.NDJ.png: 4718da4fd1b3dcaa2a11e6a2b22393c94ac3493f54da5135fcae8666a0d299a1 - TRMM_pr.50N50S.dc.NOV.png: 2774b8f477edd4ffcd85583f38c2adefcaf418c179a51745932db356f0959a08 - TRMM_pr.50N50S.dc.OCT.png: 52eb47695108f6458d9c886668590b734ffaea6b71d68bf3eb5dff7cec39c9ec - TRMM_pr.50N50S.dc.OND.png: 68a5d03c9b1c489498ffd062a9a6449a0a02b5320e76f175dea137ede6efced9 - TRMM_pr.50N50S.dc.SEP.png: b6629f261ba0700376f4cb7076427387b382ec64e6849fdf463402128fdc8a31 - TRMM_pr.50N50S.dc.SON.png: 557c9cb08ffb153e6ec607e6c01d128e74ec1ecbbdc4d1241b9c191e597c7ce6 - TRMM_pr.50N50S.dc.SUM.png: a87bcaa0d95c7f08553ed99eed70dd5069cd9a46d239ab9111445125f6c51962 - TRMM_pr.50N50S.dc.WIN.png: 71ccd815e0d45e9b84f7d02e1a65b7cba40aa7a68ad680926257049220cb7ff1 - TRMM_pr.USA.dc.AMJ.png: a09f62b3c4071e5d016bc5569b41cf6346d9c3e3190465376717e89dffba0541 - TRMM_pr.USA.dc.ANN.png: a7b67421bce68773cd890484892dd9db5adcae9b89bf30d117797cf3cf0f0c7d - TRMM_pr.USA.dc.APR.png: 09517c81b757e858412bc97695640c62007a4282bafbdca112c6bf3d1dd09500 - TRMM_pr.USA.dc.ASO.png: 97c457ea64c56d1219e008a1804737cea351bec48562fe0e96dd25abc0872ee0 - TRMM_pr.USA.dc.AUG.png: 86524b83a7cde9e3f8af7ad4e08e20206f3bb44c0a55dd1e8672f7f30e3dd02d - TRMM_pr.USA.dc.DEC.png: 19bc176afea46b15c5150824466112e8defefc610f619f09f25176772d222b6b - TRMM_pr.USA.dc.DJF.png: 62ed048300f5ad5074e6452295b4a02911e4c2292251b275ac3164faee9316a3 - TRMM_pr.USA.dc.FEB.png: b38faf63d7c70d7dd8a23c39de2a20cab4f9ebeaa384e8a9b5ae1c0d36f7f75d - TRMM_pr.USA.dc.FMA.png: a2573daad3e39eb31b418f531c213e4f9aab76e13ebff12c3464c13c1c1e3c86 - TRMM_pr.USA.dc.JAN.png: de0eb6acdf7c0ed9f72574c8e44dc2763b2a247b4744e9fb2dc95ef0f22a3a3e - TRMM_pr.USA.dc.JAS.png: e2b65f519064c6a4bc6ae965caa43db396c4b7ad21af955f9f47cc857515ea68 - TRMM_pr.USA.dc.JFM.png: 79f0a6e61d71e389ecd4423456498c2f2005ae52edaf3b2605ac5c55314dfaea - TRMM_pr.USA.dc.JJA.png: b579db0e3e887d167b0987207fec8f5cf8192411327d99c2e0c812059bc69ea6 - TRMM_pr.USA.dc.JUL.png: 1a2914fe80cd6ac2f7ff7da01ce4fd51451689378b8427d280cccdb8f09a3281 - TRMM_pr.USA.dc.JUN.png: e70f3da85d0e7c12f647fd959e6a14b107a5f5f68ba3e09042bdf63488364254 - TRMM_pr.USA.dc.MAM.png: ea0ab303ae9224673a21f80aa22c3e55a47544366ba79ec181546d540bdec2f9 - TRMM_pr.USA.dc.MAR.png: 32bcb295baab39538db65a7f0f04cccd319095a7ce7824d739d3cbcdad0ed341 - TRMM_pr.USA.dc.MAY.png: 66d83b373934f75916410b1e7f1ac066d33961e004114f56e79b098bcccdd71d - TRMM_pr.USA.dc.MJJ.png: e3da10538906d3d6ff0db1af45a7de6c9c425acf130254b90081223b51223e5b - TRMM_pr.USA.dc.NDJ.png: 118e1d37bc22f58ceb224a358f905573f0114a95ee492f0d43e8beb4dd7abb27 - TRMM_pr.USA.dc.NOV.png: 7873c4d886e582cfb0a39a4fc6b6b21ce51afba3f02bba636e8234241d1b1c50 - TRMM_pr.USA.dc.OCT.png: 847881a5d055ca62d1e377f29590038b17742139fc4c34b11a1e14f9a80897db - TRMM_pr.USA.dc.OND.png: d5493d4ace31f38caa32204df09aff99d9a6d49fa276f40374a2257430f02b16 - TRMM_pr.USA.dc.SEP.png: 8bc7a5fc0852957649efa55656cd7fecf60295f3e9fee2e966fb82ef0c686246 - TRMM_pr.USA.dc.SON.png: d306c186f0f44468d9fef64932f278f74722b356f283d9e1cac866eec4457dca - TRMM_pr.USA.dc.SUM.png: 4b0490696b1353e8d2d2a89d82fa0135886c7165f9a61ea65d1371cca0901688 - TRMM_pr.USA.dc.WIN.png: 4846b3ec46bb9e1f31cbc7d63f2b427957ec6e084484adae094cf807888a5dba diff --git a/tests/checksums/checksum_output.yml b/tests/checksums/checksum_output.yml deleted file mode 100644 index 6b638363a..000000000 --- a/tests/checksums/checksum_output.yml +++ /dev/null @@ -1,324 +0,0 @@ - -# This file was produced by make_file_checksums.py and is used by the -# test_*_checksums.py unit tests. Don't modify it by hand! -# -MDTF_GFDL.CM4.c96L32.am4g10r8_1977_1981: - MJO_prop_amp: - MDTF_Documentation_MJO_prop_amp.pdf: 587a5193890cfaf34290ab655e2f894c - MJO_prop_amp.html: f0416e24f4c6496c528e914e7ab1865d - obs/fig1a_trim.png: 58e087b3f3bd6116d590de3532fec2bdd932bf4c64aec2bad4fada185e56e26a - obs/fig1c_trim.png: 71559e16d0f0ddb6bfe7ebd684bba670efb5a13babaff0fc7ba6ac50a8f7f5db - obs/fig2a_trim.png: 36944761bf295707f73ed054f33c01a2a6c9c175133624934d9a32aa93b19dc5 -MDTF_Lmon_GISS-E2-H_historical_r1i1p1_1951_2005: - SM_ET_coupling: - MDTF_Documentation_SM_ET_coupling.pdf: ac328925b00f213dad4168f1e75ff4f8 - SM_ET_coupling.html: 11a4cd68acf1187f8b2be969d5d3197a - model/corr_mrsos_evspsbl_summer_model.png: b1033f6795fa2c653a5cdc383234cb56989903c3713d7a578496b6a0a3d69de5 - model/corr_mrsos_evspsbl_summer_model_2x2_corrP.png: 91214143aa5df2b7db930c13152f6487f858c94c58fb39c4b45eca3f19775454 - model/corr_mrsos_evspsbl_summer_model_GLEAM_diff.png: 9670b61aaabdd38b7aa0a6c886c86be940ac5b3b3f15af97102ac63eeef5e9db - model/corr_mrsos_evspsbl_summer_model_GLEAM_diff_2x2.png: efccfc3c47893c084394491959aba307445c40bdb1e4659541bff6a8bf216eb2 - model/mean_pr_summer_model.png: 7bd54b68d8b671c4519a0b585cc53348c7fb17c8165779adbb8dc18b9530c9de - model/mean_precip_summer_summer_model_GLEAM_diff.png: 993654fe3e8eeb053cbfad6f022e638b00543ea1d92b78b1472f74cd093feb5d - obs/corr_mrsos_evspsbl_summer_GLEAM_2x2.png: c4233fe7a4ab3c260cb6c4335148c4ec78ab98775aa0fff01a99cd355d3cfaa3 - obs/corr_mrsos_evspsbl_summer_GLEAM_modelres.png: ee2eafb199b9ee4140a544d90c90ae4a2b60cded949ed768caede95a56e7d7e1 - obs/mean_precip_summer_summer_GLEAM.png: 85cbe28ab839fe52707f8326921bea1d5b7878202390f05afbf6c0777dc2ed37 -MDTF_QBOi.EXP1.AMIP.001_1977_1981: - EOF_500hPa: - EOF_500hPa.html: d3f1a793802fc0699c10eb6edf445a04 - MDTF_Documentation_EOF500.pdf: 2453dda04de9e10c903b38f076461b7b - model/PS/QBOi.EXP1.AMIP.001.natl.eof1.ps: 663abb767062d8563658ec321427128c7bd73def0417ff5e5d2abd56d2e2c8b1 - model/PS/QBOi.EXP1.AMIP.001.npac.eof1.ps: 6d6f859906d2d49bbd36e71eff07ea7b578cb0d173456678b79e5db4ce008d4d - model/QBOi.EXP1.AMIP.001.natl.eof1.png: 18d2a2624deeddce5de5da2e5f21fb3183b42ccac43773dd3d02eaabcc20c6cd - model/QBOi.EXP1.AMIP.001.npac.eof1.png: c33af043697177bae5f07ea7a9592e95cbb8a0b7a5e9ae7cbd6e87eb684c47c1 - model/netCDF/QBOi.EXP1.AMIP.001.Z500.ANOMS.nc: 3b576cd5b56ed8f6996a07f38f194bba - obs/NCEP.natl.eof1.gif: dda76e56c9025c8786d498c23263a37968b9e8ef5dabbf88eb70a9f57bcd5d17 - obs/NCEP.npac.eof1.gif: 8724e271faa1fba6e1f27c4308d7b70a34ae2ac20ef5815f42d167987cfe62bf - MJO_suite: - MDTF_Documentation_MJO_suite.pdf: c1e3f079b127f88237922700b4ffd713 - MJO_suite.html: f461c8e863d5fa84d122186fcb1e383b - model/PS/QBOi.EXP1.AMIP.001.MJO.EOF.pr.summer.ps: 5ea4733500bde4018acf688ab0b7e254a22bef9b1757cf5e983292fdfb9bfdec - model/PS/QBOi.EXP1.AMIP.001.MJO.EOF.pr.winter.ps: 9bdbacac137f75677c3b99f11475dc7d331c3de6ba232461c67036647fa36ec375822a6b9b38e97db4d57feffe82bdd28f2eed08fff2540ccef85b702d821da6 - model/QBOi.EXP1.AMIP.001.MJO.EOF.pr.summer-0.png: 06674f74890ad4df96965e6df1ce4ce7f4bfe2f1355e921031f775acbbe62363 - model/QBOi.EXP1.AMIP.001.MJO.EOF.pr.summer-1.png: e7c1d4c0e03df95d327976c6f8b1d2cb668eb6c19edd977e5a372f758c68395e - model/QBOi.EXP1.AMIP.001.MJO.EOF.pr.winter-0.png: 46fe9bdf0d19cb291341daf137dee5ce46f0511f4bb6c1b6a2ba263758a4f43a - model/QBOi.EXP1.AMIP.001.MJO.EOF.pr.winter-1.png: 26eeb7bf61907e9e0f31d16a72ccfd67fa4364e6664705fcc27e300814d640c1 - model/QBOi.EXP1.AMIP.001.MJO.EOF.rlut.summer-0.png: 5d8ad52f3b44919fbc875beefea08dc1f2d5d676d6c5d7d044241c9644b41d36 - model/QBOi.EXP1.AMIP.001.MJO.EOF.rlut.summer-1.png: d8b8df1ffbc083e1abe019f3ace782a06f8d4d160550eb3b5d98c7ebf2f87655 - model/QBOi.EXP1.AMIP.001.MJO.EOF.rlut.winter-0.png: 92fdf5dd5f06d398088a7278cc0e6e4c474a4f7447bb97f648827b787660fbdd - model/QBOi.EXP1.AMIP.001.MJO.EOF.rlut.winter-1.png: b5345cc7c045acd94eb724f85b43b3eb141019d291f32852ed9c90d0b8308548 - model/QBOi.EXP1.AMIP.001.MJO.EOF.u200.summer-0.png: 618f1e0bad68c3a668fc1aa0829693d17d800cd3ae734514a0dc5e06f4404b4d - model/QBOi.EXP1.AMIP.001.MJO.EOF.u200.summer-1.png: 1fd11da4152f0d2f177e5b1a435071f236cf4a23a6191d67ba13f4172d2159c5 - model/QBOi.EXP1.AMIP.001.MJO.EOF.u200.winter-0.png: 30da9e36479fb882875d230595302e0bd47be1f0ba0d855765644753359666ea - model/QBOi.EXP1.AMIP.001.MJO.EOF.u200.winter-1.png: e30ba3752f8befe07b5630d552c41c7c3c1253518d1c55c7cdd4162222835984 - model/QBOi.EXP1.AMIP.001.MJO.EOF.u850.summer-0.png: 6d78fee9271d56fd7b01834d3deba37b79dffc212033d451218fe597120ac3d1 - model/QBOi.EXP1.AMIP.001.MJO.EOF.u850.summer-1.png: 349a6a27d77926fc7658d472bf5f1aa613dd375ebe57bcbe4e1869edd0374d9d - model/QBOi.EXP1.AMIP.001.MJO.EOF.u850.winter-0.png: 2ef6533f51a3dbc18c38f2303338ced313f4d024433f0678802ecbc15f7d1ab8 - model/QBOi.EXP1.AMIP.001.MJO.EOF.u850.winter-1.png: 56720aa66792ffbb5f96084bb33ff6307b73968805d2775a72492ddb0358c79d - model/QBOi.EXP1.AMIP.001.MJO.lag.lat.lon.annual.png: cf7db5e791d965523bef30064974d2420887e64e9112392d779d3d94b3603072 - model/QBOi.EXP1.AMIP.001.MJO.lag.lat.lon.summer.png: 31a298af035b119cf2a6bf4684c958ed044cdea312c6c3e6ee4d2e60511f4848 - model/QBOi.EXP1.AMIP.001.MJO.lag.lat.lon.winter.png: 5f177287f534c8639834c1a9189f38737b5a1ef3bf95088cb628cc27b27311ce - model/QBOi.EXP1.AMIP.001.MJO.life.cycle.summer.png: ed5ff81711ccde7a2b921c4be72b7a76091c21dda1755e0c18dda06720178733 - model/QBOi.EXP1.AMIP.001.MJO.life.cycle.summer.v2.png: 7e01e22c35298e1abd48e4b74bb6229fb057dc956fdf22ea6ad76046255d89d2 - model/QBOi.EXP1.AMIP.001.MJO.life.cycle.winter.png: f4bd2c174e404ac793951cc0de599e030e42bed63215195a4105affc5bb770b7 - model/QBOi.EXP1.AMIP.001.MJO.life.cycle.winter.v2.png: c6aff365af82e44b1179870563ac843789d7699f336b236e8b7e022f7f0ab795 - model/QBOi.EXP1.AMIP.001.MJO.png: a5f162040c162568d95b799dc118c3c2847b5326d7fb975bf302828dffb7f6d0 - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.pr.summer.png: bc5637e82ae13c7e9d626f4e49f937d7d8286e15e8a5a009214905bec71c8343 - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.pr.winter.png: 60e6d0aa032ffcc514124914b97293ea6e5da77838aac97a10889b830b12d7be - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.rlut.summer.png: 38331cb920677383d42b0ec384b59f0248c02f5d934678f6da1e47dace751a14 - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.rlut.winter.png: fb736bb36837417db430593826d777a56f61a88c995bb672def7207fc736e6b5 - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.u200.summer.png: abcc801b37c61942fdc27babd90e497dba0c63df5bc7272bcebf1000273b1231 - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.u200.winter.png: 598b54952b369ee0caa85b95df19267e5ec108e0273eb213f8152f41d9f437af - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.u850.summer.png: 913df428460bc9a594bf8b6a597f8f382e3587ded3eb6b3646c99ead9433b44e - model/QBOi.EXP1.AMIP.001.MJO.wave.freq.u850.winter.png: a1d7bfd3e4975fe005b00803aec058a76b0b1475704b46fb8373dbf04c51f8f0 - model/netCDF/MJO_PC_INDEX.nc: 9601086c1d7b1aa7dcd4e281e9eeea69 - model/netCDF/QBOi.EXP1.AMIP.001.pr.day.anom.nc: 6a35dc558c5b20355cad88a6aff3e3d4 - model/netCDF/QBOi.EXP1.AMIP.001.pr.day.nc: 21a3609f2f8d4340b77c41a504130ef5 - model/netCDF/QBOi.EXP1.AMIP.001.rlut.day.anom.nc: 84802a969605458f1b9b27a7274e91c0 - model/netCDF/QBOi.EXP1.AMIP.001.rlut.day.nc: b9925851f500b953308f5a9bb56bea44 - model/netCDF/QBOi.EXP1.AMIP.001.u200.day.anom.nc: 8b2726312210b6f87a304851c4d6d19c - model/netCDF/QBOi.EXP1.AMIP.001.u200.day.nc: c75cc327b7d4ecbc7310c9fc2de1387f - model/netCDF/QBOi.EXP1.AMIP.001.u850.day.anom.nc: 97c2e829b9f6f80925e7dad5c55b707a - model/netCDF/QBOi.EXP1.AMIP.001.u850.day.nc: 8cb4d47545c29d515817d27e1d7f450f - model/netCDF/QBOi.EXP1.AMIP.001.v200.day.anom.nc: ac1990f668e98131a4a46221aaae708d - model/netCDF/QBOi.EXP1.AMIP.001.v200.day.nc: 03106f2142d86bbbe8e25495bb6174d9 - model/netCDF/QBOi.EXP1.AMIP.001.v850.day.anom.nc: 9cc4cef13afb52faa3665fb52a833284 - model/netCDF/QBOi.EXP1.AMIP.001.v850.day.nc: ef642f0e851f58afea37202e7f6a6956 - obs/ERA.MJO.lag.lat.lon.annual.png: ea02eed6a1ae8a768ccb40ac5cd930294518820e8ac974f320a1a13af281a674 - obs/ERA.MJO.lag.lat.lon.summer.png: 3622d8853fefe9660f22aed1a2e7174b65a423e2cd2713bba3b7b377ac02ea6c - obs/ERA.MJO.lag.lat.lon.winter.png: 5dce3d5b37d52ad160af63659f57cf6b33e5690468dfe033213218b4ef772fa2 - obs/ERA.MJO.life.cycle-0.png: 67fb0fc13ad1bdc5f545ed56dbc6844430764219dea5b5c98cd679b44ff2a1d8 - obs/ERA.MJO.life.cycle-1.png: 3fc7aff7ec86e19a99a24909cdf95352710bd988f32e47d0c4233eb3b2fdb49d - obs/ERA.rlut.EOF.summer-0.png: 178e48aa2a786d8777aacff0c6f96068506a7d617065e19c1dcaf856b2eafdfe - obs/ERA.rlut.EOF.summer-1.png: 93de27daa1fca2ca9cf95177a06765a86620be37be7d4db6d724a1a74a4c9563 - obs/ERA.rlut.EOF.winter-0.png: b9a7d90dc4b56a561e668090e6246c5b0aa59a96f1629168f88fa16115d09101 - obs/ERA.rlut.EOF.winter-1.png: 68085db956fda105cf16c23df1a6384d2fd6110b373920577c33c3736ca94fd7 - obs/ERA.rlut.MJO.wave.freq.summer.png: 07f397ac48a6bb818348d4609a87f14e0b8e7510d9cef52888942b415203a33c - obs/ERA.rlut.MJO.wave.freq.winter.png: fe6cd04d8db20cd64a4a00a29bd62ec74cba15a4e0aa48f2f0506357ab50ceed - obs/ERA.u200.EOF.summer-0.png: 482f55cc1de7284feb9a18a89c0ea9eb81cb1d4d493c937e973aba7d1b657b75 - obs/ERA.u200.EOF.summer-1.png: 79233844b5d9c68198daf1af17ef07f79e93fd2a0bba7233a5a29025920a5c9f - obs/ERA.u200.EOF.winter-0.png: 3ca569dae0ad2d51c319b684f07d322f94431fff29eeec01fa6cf10362b837b5 - obs/ERA.u200.EOF.winter-1.png: 150b029ff62a8bff3de04c3779cf1dc805925e335d4fcd5c191a3bec459e08cb - obs/ERA.u200.MJO.wave.freq.summer.png: 304f5327db9ff66dd6d35968021d4fe7e068e6f1c1c0f49ae82e0bf1c6c8383e - obs/ERA.u200.MJO.wave.freq.winter.png: cc3772bd5a818b66bd47e273bda5732d12436f2420c8acb5dbc4d88ab468237e - obs/ERA.u850.EOF.summer-0.png: d2c1c25018df5f0c24b8f57c17a655685b31d3648c2993c5cbca0a5f453a6660 - obs/ERA.u850.EOF.summer-1.png: a384fe9370304410976632f3f0ce2cc9221f60903430ce182ec0d67d67ff6171 - obs/ERA.u850.EOF.winter-0.png: 5dada9c5058aa18ff95d6421a7cbb3b891f52daa8007c3949a42a8141fac14fc - obs/ERA.u850.EOF.winter-1.png: c4f1da45654271e0c5242a46f1261c9954c7ff322b1a56f8dd53f62fb47ccce0 - obs/ERA.u850.MJO.wave.freq.summer.png: 9e46d3e54809f911a6d49306efa929cc2d73d6463e23bd4ffe3d862c647c6bd8 - obs/ERA.u850.MJO.wave.freq.winter.png: d768fcdb525c0066993f21cc896c1c2ef9ea880bc38635e2bc18d19a6e36575f - obs/ERA.v200.EOF.summer-0.png: 7de16a33d8a216c6b222fb3fa9b7a6192060ce5507cd09030faf86595ef48f17 - obs/ERA.v200.EOF.summer-1.png: 7159eb4facf6d60511ef52b9f42f7d84ecf9135fab9e00e325d2454973076a96 - obs/ERA.v200.EOF.winter-0.png: 638da40564b33f8a21dc339e9b1a8a21903f7db11ec643400ecc7756944b6a1e - obs/ERA.v200.EOF.winter-1.png: 2289b83975dd2a53a71625c2b981ab78d9809c6a48ae6d0fe7d2e4fa5278a777 - obs/ERA.v200.MJO.wave.freq.summer.png: f00d915922ba63862d8298023ff64cb2bcb446e19ca7b831f035f8594935b246 - obs/ERA.v200.MJO.wave.freq.winter.png: e11a40e5b7cc92072f3dfb02a63847cf6951179d41911db0eb3b753cfea0151e - obs/ERA.v850.EOF.summer-0.png: cef5a129d83095b204cd9586180f4dfa03deda00a0dc5733e334145c93a7d15c - obs/ERA.v850.EOF.summer-1.png: 7cee81de8ef13e2a098a6da79e8eefb4ef7f62f54dc388309c73e46f604716da - obs/ERA.v850.EOF.winter-0.png: d29cf03d9280c4fc40821022c3f5b21fc187eb542e7382096c1379cbbb1f09cc - obs/ERA.v850.EOF.winter-1.png: 8668981f42e40964dc50cd2031ebabe5c29ce6ae221b07c2a8af86171286991d - obs/ERA.v850.MJO.wave.freq.summer.png: a127655ccbcefdac91cf8f334a5a369b77a75a58da26f9e4b77a3d4e5895ef5d - obs/ERA.v850.MJO.wave.freq.winter.png: 7485208c0420f3eb04c279de40ff152b72304ba5e4d56de1211a46e8e6c0b90d - obs/NCEP.MJO.gif: 612a2f25a09dfb0f0a6bfa584faa3d2a36dedf8cfa2a18155f23bf1feeed2d98 - obs/TRMM.pr.EOF.summer-0.png: b0f0a81ca3c8da773b53cb5574e3d6e74c99a7d52156ca41d9e5eec3e608d7ac - obs/TRMM.pr.EOF.summer-1.png: d18f9e35c415201f46e3cacf0fc3e0f3a035d8a47a75b97e2653a1ef31e26a0e - obs/TRMM.pr.EOF.winter-0.png: d0b937c97e3dda4aa34f39d02ab3c7f2c1bfff530d3dc7e77a2a0e8e2780993a - obs/TRMM.pr.EOF.winter-1.png: a1a7be82a9d3c719e35f363fbd0dae5b80c9f45e3ecc1a2610019d491e4735a4 - obs/TRMM.pr.MJO.wave.freq.summer.png: b4a09f8d10dcc85a7924581d522c1c9c5a86676366fe172b6bfb52e98ab968d0 - obs/TRMM.pr.MJO.wave.freq.winter.png: a9bc67a7710eb08b5c6a058d9814ed3deb73f6026a559ba75399488480b20b1c - MJO_teleconnection: - MDTF_Documentation_MJO_teleconnection.pdf: 8bc7d3f3d130bda3a165993484b8669e - MJO_teleconnection.html: 8c9031862d015f8b72245e60d275d8c8 - htmls/figure2a.html: 17c61b4f647902c366b633f72a306639 - htmls/figure2b.html: 863c5d52f3f08e182c4e4cf8bdfe1e89 - htmls/phase1.html: f08afa86761687c9eb52fa1149687918 - htmls/phase2.html: ffddf8cb65d93bb46eacb748fa82e976 - htmls/phase3.html: ea495c7939514a8a33336535c46f51f2 - htmls/phase4.html: bf686858c8b272794ea776d3d735d554 - htmls/phase5.html: fff0e606ab59123ec76e80dd14aa056d - htmls/phase6.html: 9075b595eb4643be6389f71318cbf51c - htmls/phase7.html: 1099c16e7bac8a79f843df46f63ff986 - htmls/phase8.html: 1ba51ba3f6cccc0be60e188a272ea56e - model/BSmetric_goodMJO_250mbU_Pacific_pentad0.png: ec58eb8f0c5260006a080ab8c36dd72441763454f9843bfbc1ae1fd8977314cd - model/PS/BSmetric_goodMJO_250mbU_Pacific_pentad0.eps: cc1b5e5f63843ea2086de01e7e3f6e0c65c2d8faf3da51a29d11859f429b2083 - model/PS/precip_hgtP_1_Pentad0.eps: 6162968e1d9eb0e8d7f26dcba401e957698147adf84794f26ea3690540181f2a - model/PS/precip_hgtP_2_Pentad0.eps: ec83404120e1243317b95feb7ec2516f77080f66f7633bcf7bf6de5c0f739b2e - model/PS/precip_hgtP_3_Pentad0.eps: c945ce308c95ae9b16ae134dfb82fe4b373f83c7917877c3bb6141521a4dec60 - model/PS/precip_hgtP_4_Pentad0.eps: c7133fc173af6a8d3a6dc47d63ed624d789b60c094c7aac036bf99065c9f38f6 - model/PS/precip_hgtP_5_Pentad0.eps: 913443d40dc355562c064d8d0abcabba7a2819ba0fdeb299e6fa69acc3f15c88 - model/PS/precip_hgtP_6_Pentad0.eps: 36aefd34b8c3bd10010047fe8d19a27b7ecbcf40aad8f01d806df8f1b2ec76a4 - model/PS/precip_hgtP_7_Pentad0.eps: 426a1363a68f48dee6c46e5fa78de06d63270fa5636f59ed17055225baec422a - model/PS/precip_hgtP_8_Pentad0.eps: 4f6c80519b701ff5b8c71c8d7aaa75406a429d59f74e1c209623ca997a1fd1ed - model/PS/wvEWR_MJOav_250mbPacific_updated.eps: 54d71bf7c82c49fbb92a461d53b2db021da7d6779ace3a41f5c25df230616d05 - model/netCDF/EWratio_wf_CMIP5_updated: 0eabca9e0a277e0224e731cb55ce7c4b - model/netCDF/PR_composites_QBOi.EXP1.AMIP.001.nc: 53e9e4f2347cdecb9388d6f1103fd735 - model/netCDF/QBOi.EXP1.AMIP.001_RMMs.txt: ccea7af9ea9a7b6bf8b381f5da2b6204 - model/netCDF/U250_RMS_jetext_updated: 6237bf0c58a6219ff6b222e9a28531b1 - model/netCDF/U250_RMS_updated: fd7990f73b18749994e96b46b93e3700 - model/netCDF/ccr_Z250comp_CMIP5_updated: 01ab3b91a7b49824a8577677e8eec13a - model/netCDF/corrMJO_Pac_250mb_QBOi.EXP1.AMIP.001: 159bb75e06d1a796313109213dcc2e36 - model/netCDF/geop_compositesP_hgt250_QBOi.EXP1.AMIP.001.nc: a21f30866c9f92f73627868098812d47 - model/netCDF/tstatP_hgt250_QBOi.EXP1.AMIP.001.nc: 30dc3c3c867f0753145056368abf63a2 - model/precip_hgtP_1_Pentad0.png: 0cbfab15f2a7a4a992007ce6c9e25ccbe5b44990a6da1aa5668c605b339aca00 - model/precip_hgtP_2_Pentad0.png: 9ebd63176e99cfdbb2de5bb9f9aa591c400d9d47bb58c545fb2b69ea95dd3498 - model/precip_hgtP_3_Pentad0.png: b2688ec7e6115c3f74ac04c10ec5122c9a01ebd533688621442bf360223a3ce7 - model/precip_hgtP_4_Pentad0.png: de4313b406172d1cbc4723deca02abf60641c211a48154a0c8ffc2151ffc61b4 - model/precip_hgtP_5_Pentad0.png: af1863c5295e119412ecd521ffd2d2d1c1c5ea033203daeb37d7aa5f6ddad4a3 - model/precip_hgtP_6_Pentad0.png: 11f67d4dd19fdf2fdfe1ecde8c79f140540bc90f33ab9e915c3e9612952c4621 - model/precip_hgtP_7_Pentad0.png: 345438f927981f51ac9c8b3c6ae3c28df3da9af67068010ddd27efd8a96baff7 - model/precip_hgtP_8_Pentad0.png: ff314a1224efc43d5445956b46c74b339b59170273edf0d880885bb251868dec - model/wvEWR_MJOav_250mbPacific_updated.png: 52c124aff2961fb4f436656f1e6839912f7666883f1684d05fe785540b3f514e - Wheeler_Kiladis: - MDTF_Documentation_Wavenumber-Frequency.pdf: 216fbe72f8ab6aeb7f455de40f4bb129 - Wheeler_Kiladis.html: 2895b310736a52d2dfba2dfb00c07ca9 - model/Fig1.Asym.QBOi.EXP1.AMIP.001_omega500.png: bd5430d3ccd8eeb9f5cdac86c5296a927021cccfa14d7d4faca7b5d9c53e33ff - model/Fig1.Asym.QBOi.EXP1.AMIP.001_pr.png: b592925b135d63e5c972be60bc0820f23fab7d23538ed5c980897a62d35bc69f - model/Fig1.Asym.QBOi.EXP1.AMIP.001_rlut.png: 171274b09930ebdc3ab2367472e2f7524e67bca48ef490cf4bbea77ee67189a8 - model/Fig1.Asym.QBOi.EXP1.AMIP.001_u200.png: 766b765a97078955455611d3325dd36ffcd9baacaf05a1de6f95db3f51e412cc - model/Fig1.Asym.QBOi.EXP1.AMIP.001_u850.png: 41490d022d371161d7e58259e43591527f389c09f8edceaf1cfd10c0096c96a7 - model/Fig1.Sym.QBOi.EXP1.AMIP.001_omega500.png: fcc469bfd488bf4b667ba192e7ccd1eabc9b47ac95f294d2195f93ecda448163 - model/Fig1.Sym.QBOi.EXP1.AMIP.001_pr.png: 661c67d86612b7bdd90f1249bbf04d17e4347088decc13d04effd53ee843357e - model/Fig1.Sym.QBOi.EXP1.AMIP.001_rlut.png: a38ea7b361b9b7844cd258bc20ba649cf3e7078861b2a1c53c81b1ff5a5c2c7a - model/Fig1.Sym.QBOi.EXP1.AMIP.001_u200.png: 6b1a70309257c410d0785df7a8690e8da6205785e560ac40cfd92386afea500f - model/Fig1.Sym.QBOi.EXP1.AMIP.001_u850.png: de57b370fda8cc2c277cc2cb6b3e49daa8c1276f3157715514bc06fcca69a18c - model/Fig2.QBOi.EXP1.AMIP.001_omega500.png: 8088140fb6f2ba8ce3ce5c8bb85c42d7989a3c5ed0a60e4d4b8f6b634fb8151a - model/Fig2.QBOi.EXP1.AMIP.001_pr.png: c91ba8f6379d80a08180593d87df2683cb0a674b9a907fbd54898ff276a4205f - model/Fig2.QBOi.EXP1.AMIP.001_rlut.png: e76711b10f385f33aaf3a26389ea6f18d99416c82a868de8c36dfa639eb86be6 - model/Fig2.QBOi.EXP1.AMIP.001_u200.png: f32ddf80e708efb876ea56dd0ee4454a85bc5e3dc2c7814697e8cc2bef7b3841 - model/Fig2.QBOi.EXP1.AMIP.001_u850.png: 6c54111655dfcbbb01d389af2b847be27c539e537e0b3dd4fab9ff84f651abd6 - model/Fig3.Asym.QBOi.EXP1.AMIP.001_omega500.png: e08e923722875e34c5c1616f54e2e665abeea8fef7cf51a4907e049ce87c1f20 - model/Fig3.Asym.QBOi.EXP1.AMIP.001_pr.png: 336d400a113ac3ffa225f184c982aa2dc85f5fde7dabac71985ddd3fea63ba5b - model/Fig3.Asym.QBOi.EXP1.AMIP.001_rlut.png: 7edc0ca0d4d43c11585eb879323fabdcf2b4b89ac007a3321f627dbfc2e3a537 - model/Fig3.Asym.QBOi.EXP1.AMIP.001_u200.png: 4e11e4f5ee0880882c6d61fa983e4638a5ab9c6a9d381fe5affd0061d7af6828 - model/Fig3.Asym.QBOi.EXP1.AMIP.001_u850.png: ee6bf30f4ce69d7c8422edc17d70a51e9d2655920a7a451dcbe781c290f97694 - model/Fig3.Sym.QBOi.EXP1.AMIP.001_omega500.png: a7d4de052bf3d9af95b15fc3d0c53458be304a5d06e7df6bd9cf684ce765b128 - model/Fig3.Sym.QBOi.EXP1.AMIP.001_pr.png: daf71519b6ff9d0a34afbe7863bfd898c02a1ed53b6f46258e07d97547f73d11 - model/Fig3.Sym.QBOi.EXP1.AMIP.001_rlut.png: 44c1829b83622a8cccbfd4eefd445d1ad06dc5bd463f50aab276cec8802b62d9 - model/Fig3.Sym.QBOi.EXP1.AMIP.001_u200.png: ac2959bb0ba78c38ccffe2837879c5ef3dcddee83fa6031657816fe57b57f963 - model/Fig3.Sym.QBOi.EXP1.AMIP.001_u850.png: d5ab6698dcba45c1aa3ad4e541957a33ad6e4891431b2113a254596c50fbf9c3 - obs/Fig1.Asym.TRMM_pr.png: d846c39703e1ce307b72c8665a40ea3f57462f02ef3c665e688ef0344ce9473f - obs/Fig1.Asym.omega500.NCEP.gif: 005c4c450926750e5bc828cf900291eee7c5fe8f007a2027e0908522a6777c42 - obs/Fig1.Asym.pr.NCEP.gif: 0cda044f1467b888a254f876385569e4afdb6f9cd0f0f0260a48bdb15e56be30 - obs/Fig1.Asym.rlut.NCEP.gif: 9ef8a48bcbd7c572031af008991865208e8237bf5c2510d4c171513181c82783 - obs/Fig1.Asym.rlut.NOAA.gif: faa0ab3321864019893e939b14375c0c40f03b6952dbac8538bf5fdec75a0a94 - obs/Fig1.Asym.u200.NCEP.gif: 852635cbe278b5fc73a90e8518f6d8050b997d656bd8578464cf0e9a7b98fd33 - obs/Fig1.Asym.u850.NCEP.gif: 6de677ded72a40037b82956e0047d3ef753e450e9c6442f13a1de4053f0b6fc3 - obs/Fig1.Sym.TRMM_pr.png: ba4e03c25d7ef73abaeed07d5c897496dbbd36eadb53e441a51075cc0b6359de - obs/Fig1.Sym.omega500.NCEP.gif: 70486b40b13f6aa3dd619283850472d6ae17457ff22aec02a12a83a0f02331ff - obs/Fig1.Sym.pr.NCEP.gif: dce0e720fb678fc8d0aebf6676330c33f94d6c473b5b9ddc320c8fb3cb55fb47 - obs/Fig1.Sym.rlut.NCEP.gif: ec421bb315c00d67633f7b74ce186a6598e7518fa1c6912b8e3d561fb3f108ab - obs/Fig1.Sym.rlut.NOAA.gif: 274e699b124cf0287c96c3a337eb0ddea54f52aefe40df90d534b1f87d115a4e - obs/Fig1.Sym.u200.NCEP.gif: 2b873178ef0fe5351f9f8fc9526d5dca4b2c23f963b877a333288c8cf8bc3570 - obs/Fig1.Sym.u850.NCEP.gif: d0b09837501e77c348c1a3e4a65c1c045378353da805b8f7a8839fc13bf72da4 - obs/Fig2.TRMM_pr.png: ecc54010f244118ca758e4d7eec519e1a559bf8e34a78086f9a5bc8f0c953116 - obs/Fig2.omega500.NCEP.gif: 23482d48aa8255f9435731c49a0110237f6f810c22ce23a5cacbed62fe984ba7 - obs/Fig2.pr.NCEP.gif: f8a64fc27d6337537a7226258b9f95a0962f8886e723e550878f0832bcfabe1b - obs/Fig2.rlut.NCEP.gif: b4b70c4b608641e408627d6f8f8329b4902ce48ecdb899f472a6b8ce6ba9dc3a - obs/Fig2.rlut.NOAA.gif: c7eadc3dbc7f613ac22fe7a127329a9d2dd2d85d834c82e229870adef7df0d66 - obs/Fig2.u200.NCEP.gif: 952c0e23288e87e5a8c7783858b757aeccef5894f0341da90db1673a7a1b9760 - obs/Fig2.u850.NCEP.gif: ba88920fcf6bcf9c2218d6217e056836b4b0a3dca1e84a396ef013a32b5e3578 - obs/Fig3.Asym.TRMM_pr.png: 6bf1bc66e69f7c9a3bac397eabe07feb1f7fcadf431142fb6986a6e45a245ea7 - obs/Fig3.Asym.omega500.NCEP.gif: 077cea6e095b5c515ce520949488448cd689be376d894844233244b54bfe14a7 - obs/Fig3.Asym.pr.NCEP.gif: fd5f340253ea03aa4feae12f164336ddbf130673790e3ba1ac618f35e899b7a1 - obs/Fig3.Asym.rlut.NCEP.gif: fea21a7fcc056d85320164539633549b73a41c44210b0e44f4a014deba872716 - obs/Fig3.Asym.rlut.NOAA.gif: 859b57f77a0dfcdcf30e042ca0c0756c0dbb88aac567bc58259010fdcba65a1a - obs/Fig3.Asym.u200.NCEP.gif: 9c4249f3159b572ae926c029cc052ea0b39b852fb430391a3c0b1151836e6c97 - obs/Fig3.Asym.u850.NCEP.gif: 5950f27375f8db391767f79e42499f997e68a8430bcbe656b55a6d8e7b435713 - obs/Fig3.Sym.TRMM_pr.png: 518f31b248b54c760b8e5536710f14f9333a94c398d63ed65f9e66d2e3409e1a - obs/Fig3.Sym.omega500.NCEP.gif: 51cf9b428410803ab51926d990d29ba7af53cbcfe25c9578ec084ea256b150b2 - obs/Fig3.Sym.pr.NCEP.gif: 30fd949403a47233276506647f9c6c74a3fb704869f28e408fc65218e3719f9f - obs/Fig3.Sym.rlut.NCEP.gif: 805d1bf44162b2d8d6904a1007fb2e14dc80c561e2020e3c3502078cded0408d - obs/Fig3.Sym.rlut.NOAA.gif: b0b0681e7f2660a5fbaad9f4190ba878f11a83382c17a07a04e687bb12265ca6 - obs/Fig3.Sym.u200.NCEP.gif: d731e4d921249dfe378bb6a1beedf187daffbfd44525ab7811c4fcd0efc65de6 - obs/Fig3.Sym.u850.NCEP.gif: 34623eed42e3243e5c17a6425e6f2a8791f9e3b8def37419ced7a17d09d44149 - convective_transition_diag: - MDTF_Documentation_convective_transition.pdf: 5ac9c220e2980da4be118dad1cc981ee - convecTransBasic_calc_parameters.json: 783786a95642e0e29a681d9a3360762d - convecTransBasic_plot_parameters.json: e18b12c61b25cc225c875e464901445c - convecTransCriticalCollapse_parameters.json: 9dc216e36256bc925485d22824cf190b - convective_transition_diag.html: fcc8805a20e419af67eef07dd521007f - model/PS/QBOi.EXP1.AMIP.001.convecTransBasic_qsat_int.ps: cbaf907acee15c53f94f86be791ea5c303a7a6d31e805c214163253073e18499 - model/PS/QBOi.EXP1.AMIP.001.convecTransCriticalCollapse_stats_qsat_int.ps: dd9cf857bd78f21156bdb9e55b9f2493d3ef16c7f47ccc44d5804376b6801c4e - model/PS/QBOi.EXP1.AMIP.001.convecTransCriticalCollapse_wc_qsat_int.ps: 76133494f8f53ad3798b9c62610c88d2808a033749c52a8ec137c42a6017a092 - model/QBOi.EXP1.AMIP.001.convecTransBasic_qsat_int.png: 1f8e9efd2990c646b23142580307eed7561ca4374a6bdbcccff67beb2a37060b - model/QBOi.EXP1.AMIP.001.convecTransCriticalCollapse_stats_qsat_int.png: 320580967701340e47c8c4510a42a4425437e8c928dffe7cf369ea5b1f5e49ce - model/QBOi.EXP1.AMIP.001.convecTransCriticalCollapse_wc_qsat_int.png: 59549af0c69bb2520e8ed698cd3a9f8239579627cd76a0267a1945bccb7fdb98 - model/netCDF/QBOi.EXP1.AMIP.001.convecTransBasic_qsat_int.nc: 2731a8fad1a6ea091fab2736b5a4b204 - obs/PS/convecTransBasic_R2TMIv7r1_200206_201405_res=1.00_fillNrCWV_qsat_int.ps: f5a726d841b15f89af5061fc1ac93fe333ca82b92fc3ef929e59e2ece36bb41f - obs/PS/convecTransCriticalCollapse_stats_R2TMIv7r1_200206_201405_res=0.25_fillNrCWV_qsat_int.ps: 0afb1886c6e80327d0c20caceb32854464f96058d6ec2ad6a96531c53bc9bbd1 - obs/PS/convecTransCriticalCollapse_wc_R2TMIv7r1_200206_201405_res=0.25_fillNrCWV_qsat_int.ps: 0a7185157e8f8aecbbcfd0ecf436a5e257f11147b5c985cbc1973b1456e0c936 - obs/convecTransBasic_R2TMIv7r1_200206_201405_res=1.00_fillNrCWV_qsat_int.png: a3a660dc8569920cd7d48fa80c624e19c761484c1d43ddf38a354f9bf7c54718 - obs/convecTransCriticalCollapse_stats_R2TMIv7r1_200206_201405_res=0.25_fillNrCWV_qsat_int.png: bcfa919cd8f8f96f8e363e472cbdc6ec98ea362ae01be9e2b52ec183a4cdfd96 - obs/convecTransCriticalCollapse_wc_R2TMIv7r1_200206_201405_res=0.25_fillNrCWV_qsat_int.png: 24337e5199d4c7a529f3e96dcd0da856fda18e678000b3a5b47b9741126d4d65 - precip_diurnal_cycle: - MDTF_Documentation_precip_diurnal_cycle.pdf: e3232575ae13691806a859a61a578bbb - model/netCDF/QBOi.EXP1.AMIP.001_pr_DJF_adc.nc: 80100833c73fda93cb65119610d34007 - model/netCDF/QBOi.EXP1.AMIP.001_pr_JJA_adc.nc: e6786347552d6fdf2b2938a6863333bc - model/pr.50N50S.dc.ANN.png: 93ffd1948bbef40cc5daad75a774c83caf46075d9199838ee945f1455e5fd0e3 - model/pr.50N50S.dc.DJF.png: 191aa7d47dec15875212470d9744c3bf2b5e0702d2d18e4f17f82c7fd1fc67e7 - model/pr.50N50S.dc.JJA.png: 3d1c38dde6d0c66c8964b846b95945d532b683ea00f3fda62c810bd5777c264c - model/pr.50N50S.dc.MAM.png: c683bf2a1f44eef4af374aba85046a1dbd5be5f2e70753fc3aa7c52cba56a663 - model/pr.50N50S.dc.SON.png: bdfb2a51abec52737cb5c3586a3595191bed0f4160f810febbb7c64f086839ed - model/pr.USA.dc.ANN.png: 5432f93a98505f130119dc4c0b52911e25472d635996c93a1f65e6af48ba2d31 - model/pr.USA.dc.DJF.png: e1e5e75879c6cdee087b379afbba06e08e31237b7f5b6bd16c1d5d2728e05132 - model/pr.USA.dc.JJA.png: 13f6e1ff9de0896e4e520e20f7fce0255328f722058bd4366e9a836d14a17950 - model/pr.USA.dc.MAM.png: 77bf201298a986d45d2cffec22ad76113ffc0f05427297b4a82be4599f29f677 - model/pr.USA.dc.SON.png: b880c8a5cd78c89766fd42e75ae5306ac1047cbd80a3e3c43441c3c816df1d00 - model/pr_DJF_dc_regions.png: 401d82c865fc63722dd0036b166ed056f583114f939c67b500ec9f594ee10ea8 - model/pr_JJA_dc_regions.png: b45f61931f10f1b8462e6614fb74e4c83010d2cccbe6b8941cead2427d855c75 - obs/TRMM_pr.50N50S.dc.AMJ.png: 84ea51f54dc03ca4ee7580bca4cd368305ea1ec9a2654424ea0a0eaebf365979 - obs/TRMM_pr.50N50S.dc.ANN.png: 669afa6931207326fc04817150f45c198715d29439e3cbbf4318e0a79b8c08a1 - obs/TRMM_pr.50N50S.dc.APR.png: 317989837dc5011222133530c6b42522d969aaeb4d73c2b7944052ebd74253ba - obs/TRMM_pr.50N50S.dc.ASO.png: 4aba05d02f8a5e681c5216a5962027ca9bfb39381866e47356e21607aa477f1b - obs/TRMM_pr.50N50S.dc.AUG.png: 865968f8a91b7ea15c91982a225ac99a16d6bbf2528769c8efd65e346fa122bf - obs/TRMM_pr.50N50S.dc.DEC.png: 720e1346f9066edc20b8f8519df5cb7cfe5822e34ce9001c7ff57c827c0a466e - obs/TRMM_pr.50N50S.dc.DJF.png: 46f5c0b218af0a3c537ec423851a7e6d2931cd3302faf86879ceb651d12c1378 - obs/TRMM_pr.50N50S.dc.FEB.png: 5d32c5b9c510dffeca32bcabe41c3133e9bdf3d2194d06ac41d0f9a606425e5e - obs/TRMM_pr.50N50S.dc.FMA.png: 2a23bed7e8c2abfd015753323d2713641c8c6581ca29cb50d7b13ee7105a6bdd - obs/TRMM_pr.50N50S.dc.JAN.png: d8692bbaa0b551a4ace1b1e4acfe0de39a984a38de9ef701d969f689ddbe965d - obs/TRMM_pr.50N50S.dc.JAS.png: 1b01f9ccf6d608b1b6664d01d99bb79a0900893ba652c49a74d584e9420c7c3d - obs/TRMM_pr.50N50S.dc.JFM.png: 1ed1c5ae2405a2fc2c6bd4f09c481741134755c464e32e6538802ca803dd4ed8 - obs/TRMM_pr.50N50S.dc.JJA.png: d8eff81c2f8a152317d2a53df878a62c3e031d30bc5a5d7ef3dbccc6ba1a7af9 - obs/TRMM_pr.50N50S.dc.JUL.png: ec621d6287352e0557b5efdb010b86e0ce98b8e03c59af4b5eaef487f0d5596a - obs/TRMM_pr.50N50S.dc.JUN.png: 8e00fa3c7d14beb7ad6ee22d495947505307f520e0522b13fc79aa0e0e9fe586 - obs/TRMM_pr.50N50S.dc.MAM.png: 7f92a9b1d2818cf61c8182e81bcedfb6c3c447c2193ecd5b463d039643f28cd7 - obs/TRMM_pr.50N50S.dc.MAR.png: 14cf4f9ea3b791a050ab610cb5ee5e5c2aeba3dac9464991031a09bb24cb5df3 - obs/TRMM_pr.50N50S.dc.MAY.png: a6b69d156e38c2e183328373e2263d906649b63f2ad0d10824f27c8d09e9650e - obs/TRMM_pr.50N50S.dc.MJJ.png: 4c1c21e7680cc4daa834503e7d5600e697ae551c0f5d1497c3212cd1c8394389 - obs/TRMM_pr.50N50S.dc.NDJ.png: 4718da4fd1b3dcaa2a11e6a2b22393c94ac3493f54da5135fcae8666a0d299a1 - obs/TRMM_pr.50N50S.dc.NOV.png: 2774b8f477edd4ffcd85583f38c2adefcaf418c179a51745932db356f0959a08 - obs/TRMM_pr.50N50S.dc.OCT.png: 52eb47695108f6458d9c886668590b734ffaea6b71d68bf3eb5dff7cec39c9ec - obs/TRMM_pr.50N50S.dc.OND.png: 68a5d03c9b1c489498ffd062a9a6449a0a02b5320e76f175dea137ede6efced9 - obs/TRMM_pr.50N50S.dc.SEP.png: b6629f261ba0700376f4cb7076427387b382ec64e6849fdf463402128fdc8a31 - obs/TRMM_pr.50N50S.dc.SON.png: 557c9cb08ffb153e6ec607e6c01d128e74ec1ecbbdc4d1241b9c191e597c7ce6 - obs/TRMM_pr.50N50S.dc.SUM.png: a87bcaa0d95c7f08553ed99eed70dd5069cd9a46d239ab9111445125f6c51962 - obs/TRMM_pr.50N50S.dc.WIN.png: 71ccd815e0d45e9b84f7d02e1a65b7cba40aa7a68ad680926257049220cb7ff1 - obs/TRMM_pr.USA.dc.AMJ.png: a09f62b3c4071e5d016bc5569b41cf6346d9c3e3190465376717e89dffba0541 - obs/TRMM_pr.USA.dc.ANN.png: a7b67421bce68773cd890484892dd9db5adcae9b89bf30d117797cf3cf0f0c7d - obs/TRMM_pr.USA.dc.APR.png: 09517c81b757e858412bc97695640c62007a4282bafbdca112c6bf3d1dd09500 - obs/TRMM_pr.USA.dc.ASO.png: 97c457ea64c56d1219e008a1804737cea351bec48562fe0e96dd25abc0872ee0 - obs/TRMM_pr.USA.dc.AUG.png: 86524b83a7cde9e3f8af7ad4e08e20206f3bb44c0a55dd1e8672f7f30e3dd02d - obs/TRMM_pr.USA.dc.DEC.png: 19bc176afea46b15c5150824466112e8defefc610f619f09f25176772d222b6b - obs/TRMM_pr.USA.dc.DJF.png: 62ed048300f5ad5074e6452295b4a02911e4c2292251b275ac3164faee9316a3 - obs/TRMM_pr.USA.dc.FEB.png: b38faf63d7c70d7dd8a23c39de2a20cab4f9ebeaa384e8a9b5ae1c0d36f7f75d - obs/TRMM_pr.USA.dc.FMA.png: a2573daad3e39eb31b418f531c213e4f9aab76e13ebff12c3464c13c1c1e3c86 - obs/TRMM_pr.USA.dc.JAN.png: de0eb6acdf7c0ed9f72574c8e44dc2763b2a247b4744e9fb2dc95ef0f22a3a3e - obs/TRMM_pr.USA.dc.JAS.png: e2b65f519064c6a4bc6ae965caa43db396c4b7ad21af955f9f47cc857515ea68 - obs/TRMM_pr.USA.dc.JFM.png: 79f0a6e61d71e389ecd4423456498c2f2005ae52edaf3b2605ac5c55314dfaea - obs/TRMM_pr.USA.dc.JJA.png: b579db0e3e887d167b0987207fec8f5cf8192411327d99c2e0c812059bc69ea6 - obs/TRMM_pr.USA.dc.JUL.png: 1a2914fe80cd6ac2f7ff7da01ce4fd51451689378b8427d280cccdb8f09a3281 - obs/TRMM_pr.USA.dc.JUN.png: e70f3da85d0e7c12f647fd959e6a14b107a5f5f68ba3e09042bdf63488364254 - obs/TRMM_pr.USA.dc.MAM.png: ea0ab303ae9224673a21f80aa22c3e55a47544366ba79ec181546d540bdec2f9 - obs/TRMM_pr.USA.dc.MAR.png: 32bcb295baab39538db65a7f0f04cccd319095a7ce7824d739d3cbcdad0ed341 - obs/TRMM_pr.USA.dc.MAY.png: 66d83b373934f75916410b1e7f1ac066d33961e004114f56e79b098bcccdd71d - obs/TRMM_pr.USA.dc.MJJ.png: e3da10538906d3d6ff0db1af45a7de6c9c425acf130254b90081223b51223e5b - obs/TRMM_pr.USA.dc.NDJ.png: 118e1d37bc22f58ceb224a358f905573f0114a95ee492f0d43e8beb4dd7abb27 - obs/TRMM_pr.USA.dc.NOV.png: 7873c4d886e582cfb0a39a4fc6b6b21ce51afba3f02bba636e8234241d1b1c50 - obs/TRMM_pr.USA.dc.OCT.png: 847881a5d055ca62d1e377f29590038b17742139fc4c34b11a1e14f9a80897db - obs/TRMM_pr.USA.dc.OND.png: d5493d4ace31f38caa32204df09aff99d9a6d49fa276f40374a2257430f02b16 - obs/TRMM_pr.USA.dc.SEP.png: 8bc7a5fc0852957649efa55656cd7fecf60295f3e9fee2e966fb82ef0c686246 - obs/TRMM_pr.USA.dc.SON.png: d306c186f0f44468d9fef64932f278f74722b356f283d9e1cac866eec4457dca - obs/TRMM_pr.USA.dc.SUM.png: 4b0490696b1353e8d2d2a89d82fa0135886c7165f9a61ea65d1371cca0901688 - obs/TRMM_pr.USA.dc.WIN.png: 4846b3ec46bb9e1f31cbc7d63f2b427957ec6e084484adae094cf807888a5dba - obs/netCDF/TRMM_DJF_adc.nc: b91204745087776f592a4b88ec6c4eb1 - obs/netCDF/TRMM_JJA_adc.nc: d2742e6c2177b785cfa5e521711da1bc - precip_diurnal_cycle.html: 4ff9739351d07994a128f33bfc4395d3 diff --git a/tests/make_file_checksums.py b/tests/make_file_checksums.py deleted file mode 100755 index 927b9395d..000000000 --- a/tests/make_file_checksums.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -import os -import sys -import argparse -import textwrap -import json -import shared_test_utils as shared - -def checksum_in_subtree_1(rootdir, reference_subdirs, exclude_exts=[]): - # descend into subdirectories, then flatten file hierarchy - checksum_dict = {} - for d1 in reference_subdirs: - p1 = os.path.join(rootdir, d1) - assert os.path.isdir(p1) - checksum_dict[d1] = shared.checksum_files_in_subtree(p1, exclude_exts) - return checksum_dict - -def make_output_data_dict(rootdir, case_list, exclude_exts=[]): - # same except we descend 2 levels deep and then flatten - checksum_dict = {} - for c in case_list: - d1 = c['dir'] - p1 = os.path.join(rootdir, d1) - assert os.path.isdir(p1) - checksum_dict[d1] = {} - for d2 in c['pod_list']: - p2 = os.path.join(rootdir, d1, d2) - assert os.path.isdir(p2) - checksum_dict[d1][d2] = shared.checksum_files_in_subtree(p2, exclude_exts) - return checksum_dict - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('config_file', nargs='?', type=str, - default='', help="Configuration file.") - args = parser.parse_args() - - header = """ - # This file was produced by make_file_checksums.py and is used by the - # test_*_checksums.py unit tests. Don't modify it by hand! - # - """ - - config = shared.get_configuration(args.config_file, check_input=True, check_output=True) - md5_path = config['paths']['md5_path'] - obs_path = config['paths']['OBS_ROOT_DIR'] - model_path = config['paths']['MODEL_ROOT_DIR'] - out_path = config['paths']['OUTPUT_DIR'] - - case_list = shared.get_test_data_configuration() - - print('Hashing input observational data') - checksum_dict = checksum_in_subtree_1(obs_path, case_list['pods']) - with open(os.path.join(md5_path, 'checksum_obs_data.json'), 'w') as file_obj: - file_obj.write(textwrap.dedent(header)) - json.dump(checksum_dict, file_obj) - - print('Hashing input model data') - checksum_dict = checksum_in_subtree_1(model_path, case_list['models']) - with open(os.path.join(md5_path, 'checksum_model_data.json'), 'w') as file_obj: - file_obj.write(textwrap.dedent(header)) - json.dump(checksum_dict, file_obj) - - print('Hashing output data') - checksum_dict = make_output_data_dict(out_path, case_list['case_list'], - ['.tar','.tar_old','.log','.json']) - with open(os.path.join(md5_path, 'checksum_output.json'), 'w') as file_obj: - file_obj.write(textwrap.dedent(header)) - json.dump(checksum_dict, file_obj) \ No newline at end of file diff --git a/tests/pod_test.py b/tests/pod_test.py deleted file mode 100644 index 0adf13269..000000000 --- a/tests/pod_test.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -import unittest -import test_input_checksums -import test_POD_execution -import test_output_checksums - -#if __name__ == '__main__': -os.environ['_MDTF_DATA_TEST'] = 'true' - -# TestSuite lets us control the order of test execution -loader = unittest.TestLoader() -full_suite = unittest.TestSuite() -full_suite.addTests(loader.loadTestsFromModule(test_input_checksums)) -full_suite.addTests(loader.loadTestsFromModule(test_POD_execution)) -full_suite.addTests(loader.loadTestsFromModule(test_output_checksums)) - -results = unittest.TextTestRunner().run(full_suite) -os.environ.pop('_MDTF_DATA_TEST') diff --git a/tests/pod_test_configs.yml b/tests/pod_test_configs.yml deleted file mode 100644 index 0384a3ef7..000000000 --- a/tests/pod_test_configs.yml +++ /dev/null @@ -1,29 +0,0 @@ -# Specifications for models to use for convergence testing of individual PODs. -# -case_list: - - CASENAME: QBOi.EXP1.AMIP.001 - model: CESM - variable_convention: CESM - FIRSTYR: 1977 - LASTYR: 1981 - pod_list: - - Wheeler_Kiladis - - EOF_500hPa - - convective_transition_diag - - MJO_suite - - MJO_teleconnection - - precip_diurnal_cycle - - CASENAME: GFDL.CM4.c96L32.am4g10r8 - model: AM4 - variable_convention: AM4 - FIRSTYR: 1977 - LASTYR: 1981 - pod_list: - - MJO_prop_amp - - CASENAME: Lmon_GISS-E2-H_historical_r1i1p1 - model: CMIP - variable_convention: CMIP - FIRSTYR: 1951 - LASTYR: 2005 - pod_list: - - SM_ET_coupling diff --git a/tests/test_POD_execution.py b/tests/test_POD_execution.py deleted file mode 100644 index d02b63c74..000000000 --- a/tests/test_POD_execution.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import sys -import unittest -import subprocess -from src.util import write_json -from src.tests import shared_test_utils as shared - -DOING_TRAVIS = (os.environ.get('TRAVIS', False) == 'true') -DOING_MDTF_DATA_TESTS = ('--data_tests' in sys.argv) -DOING_SETUP = DOING_MDTF_DATA_TESTS and not DOING_TRAVIS -# All this is a workaround because tests are programmatically generated at -# import time, but the tests are skipped at runtime. We're skipping tests -# because we're not in an environment where we have the data to set them up, -# so we just throw everything in an if-block to ensure they don't get generated -# if they're going to be skipped later. - -if DOING_SETUP: - config = shared.get_configuration('', check_input = True) - out_path = config['paths']['OUTPUT_DIR'] - - case_list = shared.get_test_data_configuration() - - # write temp configuration, one for each POD - temp_config = config.copy() - temp_config['pod_list'] = [] - temp_config['settings']['make_variab_tar'] = False - temp_config['settings']['test_mode'] = True - - pod_configs = shared.configure_pods(case_list, config_to_insert=temp_config) - for pod in case_list['pods']: - write_json(pod_configs[pod], os.path.join(out_path, pod+'_temp.json')) - - -# Python 3 has subTest; in 2.7 to avoid introducing other dependencies we use -# the advanced construction presented in https://stackoverflow.com/a/20870875 -# to programmatically generate tests - -class TestSequenceMeta(type): - def __new__(mcs, name, bases, test_dict): - def generate_test(pod_name): - def test(self): - temp_config_file = os.path.join(out_path, pod_name+'_temp.json') - self.assertEqual(0, subprocess.check_call( - ['python', 'src/mdtf.py', temp_config_file] - )) - # should do better cleanup here - return test - - if DOING_SETUP: - for pod in case_list['pods']: - test_name = "test_pod_" + pod - test_dict[test_name] = generate_test(pod) - return type.__new__(mcs, name, bases, test_dict) - -@unittest.skipIf(DOING_TRAVIS, - "Skipping POD execution tests because running in Travis CI environment") -@unittest.skipUnless(DOING_MDTF_DATA_TESTS, - "Skipping POD execution tests because not running data-intensive test suite.") -class TestPODExecution(unittest.TestCase): - __metaclass__ = TestSequenceMeta - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/tests/test_input_checksums.py b/tests/test_input_checksums.py deleted file mode 100755 index b6d9a0538..000000000 --- a/tests/test_input_checksums.py +++ /dev/null @@ -1,54 +0,0 @@ -import os -import sys -import unittest -from src.util import read_json -from src.tests import shared_test_utils as shared - -DOING_TRAVIS = (os.environ.get('TRAVIS', False) == 'true') -DOING_MDTF_DATA_TESTS = ('--data_tests' in sys.argv) -DOING_SETUP = DOING_MDTF_DATA_TESTS and not DOING_TRAVIS -# All this is a workaround because tests are programmatically generated at -# import time, but the tests are skipped at runtime. We're skipping tests -# because we're not in an environment where we have the data to set them up, -# so we just throw everything in an if-block to ensure they don't get generated -# if they're going to be skipped later. - -if DOING_SETUP: - config = shared.get_configuration('', check_input=True) - md5_path = config['paths']['md5_path'] - obs_path = config['paths']['OBS_DATA_ROOT'] - model_path = config['paths']['MODEL_DATA_ROOT'] - - case_list = shared.get_test_data_configuration() - - obs_data_checksums = read_json(os.path.join(md5_path, 'checksum_obs_data.json')) - model_data_checksums = read_json(os.path.join(md5_path, 'checksum_model_data.json')) - -# Python 3 has subTest; in 2.7 to avoid introducing other dependencies we use -# the advanced construction presented in https://stackoverflow.com/a/20870875 -# to programmatically generate tests - -class TestSequenceMeta(type): - def __new__(mcs, name, bases, test_dict): - if DOING_SETUP: - for pod in case_list['pods']: - test_name = "test_input_checksum_"+pod - test_dict[test_name] = shared.generate_checksum_test( - pod, obs_path, obs_data_checksums) - - for model in case_list['models']: - test_name = "test_input_checksum_"+model - test_dict[test_name] = shared.generate_checksum_test( - model, model_path, model_data_checksums) - return type.__new__(mcs, name, bases, test_dict) - -@unittest.skipIf(DOING_TRAVIS, - "Skipping input file md5 tests because running in Travis CI environment") -@unittest.skipUnless(DOING_MDTF_DATA_TESTS, - "Skipping input file md5 tests because not running data-intensive test suite.") -class TestInputChecksums(unittest.TestCase): - __metaclass__ = TestSequenceMeta - -if __name__ == '__main__': - unittest.main() - diff --git a/tests/test_output_checksums.py b/tests/test_output_checksums.py deleted file mode 100644 index b41610c0f..000000000 --- a/tests/test_output_checksums.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -import sys -import unittest -import subprocess -from src.util import read_json -from src.tests import shared_test_utils as shared - -DOING_TRAVIS = (os.environ.get('TRAVIS', False) == 'true') -DOING_MDTF_DATA_TESTS = ('--data_tests' in sys.argv) -DOING_SETUP = DOING_MDTF_DATA_TESTS and not DOING_TRAVIS -# All this is a workaround because tests are programmatically generated at -# import time, but the tests are skipped at runtime. We're skipping tests -# because we're not in an environment where we have the data to set them up, -# so we just throw everything in an if-block to ensure they don't get generated -# if they're going to be skipped later. - -if DOING_SETUP: - config = shared.get_configuration('', check_output=True) - md5_path = config['paths']['md5_path'] - out_path = config['paths']['OUTPUT_DIR'] - - case_list = shared.get_test_data_configuration() - - output_checksums = read_json(os.path.join(md5_path, 'checksum_output.json')) - -# Python 3 has subTest; in 2.7 to avoid introducing other dependencies we use -# the advanced construction presented in https://stackoverflow.com/a/20870875 -# to programmatically generate tests - -class PNGTestSequenceMeta(type): - def __new__(mcs, name, bases, test_dict): - if DOING_SETUP: - for case in case_list['case_list']: - case_path = os.path.join(out_path, case['dir']) - for pod in case['pod_list']: - test_name = "test_output_png_md5_"+pod - test_dict[test_name] = shared.generate_checksum_test( - pod, case_path, output_checksums[case['dir']], ['.png']) - return type.__new__(mcs, name, bases, test_dict) - -@unittest.skipIf(DOING_TRAVIS, - "Skipping output file md5 tests because running in Travis CI environment") -@unittest.skipUnless(DOING_MDTF_DATA_TESTS, - "Skipping output file md5 tests because not running data-intensive test suite.") -class TestOutputPNGMD5(unittest.TestCase): - __metaclass__ = PNGTestSequenceMeta - - -class NCTestSequenceMeta(type): - def __new__(mcs, name, bases, test_dict): - if DOING_SETUP: - for case in case_list['case_list']: - case_path = os.path.join(out_path, case['dir']) - for pod in case['pod_list']: - test_name = "test_output_png_md5_"+pod - test_dict[test_name] = shared.generate_checksum_test( - pod, case_path, output_checksums[case['dir']], ['.nc']) - return type.__new__(mcs, name, bases, test_dict) - -@unittest.expectedFailure # netcdfs won't be bitwise reproducible -@unittest.skipIf(DOING_TRAVIS, - "Skipping output file md5 tests because running in Travis CI environment") -@unittest.skipUnless(DOING_MDTF_DATA_TESTS, - "Skipping output file md5 tests because not running data-intensive test suite.") -class TestOutputNCMD5(unittest.TestCase): - __metaclass__ = NCTestSequenceMeta - -if __name__ == '__main__': - unittest.main() - diff --git a/tests/travis_test.jsonc b/tests/travis_test.jsonc deleted file mode 100644 index c76ecc2a7..000000000 --- a/tests/travis_test.jsonc +++ /dev/null @@ -1,101 +0,0 @@ -// Configuration for MDTF-diagnostics driver script self-test. -// -// Copy this file and customize the settings as needed to run the framework on -// your own model output without repeating command-line options. Pass it to the -// framework at the end of the command line (positionally) or with the -// -f/--input-file flag. Any other explicit command line options will override -// what's listed here. -// -// All text to the right of an unquoted "//" is a comment and ignored, as well -// as blank lines (JSONC quasi-standard.) -{ - "case_list" : [ - // The cases below correspond to the different sample model data sets. Note - // that the MDTF package does not currently support analyzing multiple - // models in a single invocation. Comment out or delete the first entry and - // uncomment the second to run NOAA-GFDL-AM4 only for the MJO_prop_amp POD, - // and likewise for the SM_ET_coupling POD. - { - "CASENAME" : "NCAR-CAM5.timeslice", - "model" : "NCAR-CAM5", - "convention" : "CMIP", - "FIRSTYR" : 2000, - "LASTYR" : 2003, // test date range is 2000-2004 - "pod_list": [ - // Optional: PODs to run for this model only (defaults to all) - "EOF_500hPa", - "example" - ] - } - ], - // PATHS --------------------------------------------------------------------- - // Location of supporting data downloaded when the framework was installed. - - // If a relative path is given, it's resolved relative to the MDTF-diagnostics - // code directory. Environment variables (eg, $HOME) can be referenced with a - // "$" and will be expended to their current values when the framework runs. - - // Parent directory containing observational data used by individual PODs. - "OBS_DATA_ROOT": "../travis_test/obs_data", - - // Parent directory containing results from different models. - "MODEL_DATA_ROOT": "../travis_test/model/", - - // Working directory. Defaults to working directory if blank. - "WORKING_DIR": "../wkdir", - - // Directory to write output. The results of each run of the framework will be - // put in a subdirectory of this directory. - "OUTPUT_DIR": "../wkdir", - - // Location of the Anaconda/miniconda installation to use for managing - // dependencies (path returned by running `conda info --base`.) If empty, - // framework will attempt to determine location of system's conda installation. - "conda_root": "$HOME/miniconda", - - // Directory containing the framework-specific conda environments. This should - // be equal to the "--env_dir" flag passed to conda_env_setup.sh. If left - // blank, the framework will look for its environments in the system default - // location. - "conda_env_root": "", - - // SETTINGS ------------------------------------------------------------------ - // Any command-line option recognized by the mdtf script (type `mdtf --help`) - // can be set here, in the form "flag name": "desired setting". - - // Method used to fetch model data. - "data_manager": "Local_File", - - // Method used to manage dependencies. - "environment_manager": "Conda", - - // Method used to preprocess data. - "preprocessor": "SampleModelData", - - // Settings affecting what output is generated: - - // Set to true to have PODs save postscript figures in addition to bitmaps. - "save_ps": false, - - // Set to true to have PODs save netCDF files of processed data. - "save_nc": false, - - // Set to true to save HTML and bitmap plots in a .tar file. - "make_variab_tar": false, - - // Set to true to overwrite results in OUTPUT_DIR; otherwise results saved - // under a unique name. - "overwrite": false, - - // Settings used in debugging: - - // Log verbosity level. - "verbose": 1, - - // Set to true for framework test. Data is fetched but PODs are not run. - "test_mode": false, - - // Set to true for framework test. No external commands are run and no remote - // data is copied. Implies test_mode. - "dry_run": false -} diff --git a/tools/catalog_builder/catalog_builder.py b/tools/catalog_builder/catalog_builder.py index 124a45934..6044adb93 100644 --- a/tools/catalog_builder/catalog_builder.py +++ b/tools/catalog_builder/catalog_builder.py @@ -143,10 +143,9 @@ def __init__(self): # in variables using intake-esm self.xarray_aggregations = [ {'type': 'union', 'attribute_name': 'variable_id'}, - { - 'type': 'join_existing', - 'attribute_name': 'time_range', - 'options': {'dim': 'time', 'coords': 'minimal', 'compat': 'override'} + {'type': 'join_existing', + 'attribute_name': 'time_range', + 'options': {'dim': 'time', 'coords': 'minimal', 'compat': 'override'} } ] self.data_format = "netcdf" # netcdf or zarr diff --git a/tools/catalog_builder/examples/cmip/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.csv b/tools/catalog_builder/examples/cmip/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.csv new file mode 100644 index 000000000..c43a028ef --- /dev/null +++ b/tools/catalog_builder/examples/cmip/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.csv @@ -0,0 +1,155 @@ +activity_id,branch_method,branch_time_in_child,branch_time_in_parent,experiment,experiment_id,frequency,grid,grid_label,institution_id,nominal_resolution,parent_activity_id,parent_experiment_id,parent_source_id,parent_time_units,parent_variant_label,product,realm,source_id,source_type,sub_experiment,sub_experiment_id,table_id,variable_id,variant_label,member_id,standard_name,long_name,units,vertical_levels,init_year,start_time,end_time,time_range,path,version +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,6hr,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,6hrPlev,sfcWind,r1i1p1f1,r1i1p1f1,wind_speed,Near-Surface Wind Speed,m s-1,1,,1850-01-01 03:00:00,2014-12-31 21:00:00,1850-01-01 03:00:00-2014-12-31 21:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/6hrPlev/sfcWind/gr/v20200729/sfcWind_6hrPlev_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001010300-201412312100.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,6hrPt,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,6hrPlevPt,sfcWind,r1i1p1f1,r1i1p1f1,wind_speed,Near-Surface Wind Speed,m s-1,1,,1850-01-01 06:00:00,2010-01-01 00:00:00,1850-01-01 06:00:00-2010-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/6hrPlevPt/sfcWind/gr/v20200729/sfcWind_6hrPlevPt_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001010600-201001010000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,ch4,r1i1p1f1,r1i1p1f1,mole_fraction_of_methane_in_air,CH4 volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/ch4/gr/v20200729/ch4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,cheaqpso4,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_aqueous_phase_net_chemical_production,Aqueous-phase production rate of SO4,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/cheaqpso4/gr/v20200729/cheaqpso4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,co,r1i1p1f1,r1i1p1f1,mole_fraction_of_carbon_monoxide_in_air,CO volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/co/gr/v20200729/co_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,drybc,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_dry_deposition,dry deposition rate of black carbon aerosol mass,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/drybc/gr/v20200729/drybc_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,drynh3,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_ammonia_due_to_dry_deposition,dry deposition rate of nh3,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/drynh3/gr/v20200729/drynh3_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,drynh4,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_ammonium_dry_aerosol_particles_due_to_dry_deposition,dry deposition rate of nh4,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/drynh4/gr/v20200729/drynh4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,drynoy,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_noy_expressed_as_nitrogen_due_to_dry_deposition,dry deposition rate of noy,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/drynoy/gr/v20200729/drynoy_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,dryso2,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_sulfur_dioxide_due_to_dry_deposition,dry deposition rate of so2,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/dryso2/gr/v20200729/dryso2_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,dryso4,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_dry_deposition,dry deposition rate of so4,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/dryso4/gr/v20200729/dryso4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emibc,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_emission,emission rate of black carbon aerosol mass,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emibc/gr/v20200729/emibc_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emico,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_carbon_monoxide_due_to_emission,total emission rate of co,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emico/gr/v20200729/emico_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emidms,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_dimethyl_sulfide_due_to_emission,total emission rate of dms,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emidms/gr/v20200729/emidms_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emidust,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_dust_dry_aerosol_particles_due_to_emission,total emission rate of dust,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emidust/gr/v20200729/emidust_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emiisop,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_isoprene_due_to_emission,total emission rate of isoprene,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emiisop/gr/v20200729/emiisop_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emilnox,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_moles_of_nox_expressed_as_nitrogen,layer-integrated lightning production of NOx,mol s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emilnox/gr/v20200729/emilnox_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,eminh3,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_ammonia_due_to_emission,total emission rate of nh3,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/eminh3/gr/v20200729/eminh3_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,eminox,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_nox_expressed_as_nitrogen_due_to_emission,total emission rate of nox,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/eminox/gr/v20200729/eminox_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emioa,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_particulate_organic_matter_dry_aerosol_particles_due_to_net_chemical_production_and_emission,primary emission and chemical production of dry aerosol organic matter,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emioa/gr/v20200729/emioa_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emiso2,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_sulfur_dioxide_due_to_emission,total emission rate of so2,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emiso2/gr/v20200729/emiso2_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,emiss,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mass_content_of_sea_salt_dry_aerosol_particles_due_to_emission,total emission rate of seasalt,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/emiss/gr/v20200729/emiss_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,isop,r1i1p1f1,r1i1p1f1,mole_fraction_of_isoprene_in_air,Isoprene volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/isop/gr/v20200729/isop_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,lossch4,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mole_concentration_of_methane_due_to_chemical_destruction,Monthly Loss of atmospheric Methane,mol m-3 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/lossch4/gr/v20200729/lossch4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,lossco,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mole_concentration_of_carbon_monoxide_due_to_chemical_destruction,Monthly Loss of atmospheric Carbon Monoxide,mol m-3 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/lossco/gr/v20200729/lossco_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,mmrbc,r1i1p1f1,r1i1p1f1,mass_fraction_of_elemental_carbon_dry_aerosol_particles_in_air,Elemental carbon mass mixing ratio,kg kg-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/mmrbc/gr/v20200729/mmrbc_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,mmrnh4,r1i1p1f1,r1i1p1f1,mass_fraction_of_ammonium_dry_aerosol_particles_in_air,NH4 mass mixing ratio,kg kg-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/mmrnh4/gr/v20200729/mmrnh4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,mmrno3,r1i1p1f1,r1i1p1f1,mass_fraction_of_nitrate_dry_aerosol_particles_in_air,NO3 aerosol mass mixing ratio,kg kg-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/mmrno3/gr/v20200729/mmrno3_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,mmrso4,r1i1p1f1,r1i1p1f1,mass_fraction_of_sulfate_dry_aerosol_particles_in_air,Aerosol sulfate mass mixing ratio,kg kg-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/mmrso4/gr/v20200729/mmrso4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,no,r1i1p1f1,r1i1p1f1,mole_fraction_of_nitrogen_monoxide_in_air,NO volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/no/gr/v20200729/no_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,no2,r1i1p1f1,r1i1p1f1,mole_fraction_of_nitrogen_dioxide_in_air,NO2 volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/no2/gr/v20200729/no2_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,o3,r1i1p1f1,r1i1p1f1,mole_fraction_of_ozone_in_air,Ozone volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/o3/gr/v20200729/o3_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,o3loss,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mole_concentration_of_ozone_due_to_chemical_destruction,O3 destruction rate,mol m-3 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/o3loss/gr/v20200729/o3loss_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,o3prod,r1i1p1f1,r1i1p1f1,tendency_of_atmosphere_mole_concentration_of_ozone_due_to_chemical_production,O3 production rate,mol m-3 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/o3prod/gr/v20200729/o3prod_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,o3ste,r1i1p1f1,r1i1p1f1,mole_fraction_of_ozone_in_air,Stratospheric Ozone Tracer Volume Mixing Ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/o3ste/gr/v20200729/o3ste_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,od550bc,r1i1p1f1,r1i1p1f1,atmosphere_optical_thickness_due_to_black_carbon_ambient_aerosol,black carbon aod@550nm,1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/od550bc/gr/v20200729/od550bc_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,od550no3,r1i1p1f1,r1i1p1f1,atmosphere_optical_thickness_due_to_nitrate_ambient_aerosol_particles,nitrate aod@550nm,1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/od550no3/gr/v20200729/od550no3_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,od550oa,r1i1p1f1,r1i1p1f1,atmosphere_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles,total organic aerosol aod@550nm,1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/od550oa/gr/v20200729/od550oa_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,od550so4,r1i1p1f1,r1i1p1f1,atmosphere_optical_thickness_due_to_sulfate_ambient_aerosol_particles,sulfate aod@550nm,1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/od550so4/gr/v20200729/od550so4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,so2,r1i1p1f1,r1i1p1f1,mole_fraction_of_sulfur_dioxide_in_air,SO2 volume mixing ratio,mol mol-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/so2/gr/v20200729/so2_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,wetbc,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_elemental_carbon_dry_aerosol_particles_due_to_wet_deposition,wet deposition rate of black carbon aerosol mass,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/wetbc/gr/v20200729/wetbc_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,wetnh3,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_ammonia_due_to_wet_deposition,Wet Deposition Rate of NH3,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/wetnh3/gr/v20200729/wetnh3_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,wetnh4,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_ammonium_dry_aerosol_particles_due_to_wet_deposition,Wet Deposition Rate of NH4,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/wetnh4/gr/v20200729/wetnh4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,wetnoy,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_noy_expressed_as_nitrogen_due_to_wet_deposition,Wet Deposition Rate of NOy including Aerosol Nitrate,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/wetnoy/gr/v20200729/wetnoy_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,wetso2,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_sulfur_dioxide_due_to_wet_deposition,Wet Deposition Rate of SO2,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/wetso2/gr/v20200729/wetso2_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,aerosol,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,AERmon,wetso4,r1i1p1f1,r1i1p1f1,minus_tendency_of_atmosphere_mass_content_of_sulfate_dry_aerosol_particles_due_to_wet_deposition,Wet Deposition Rate of SO4,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/AERmon/wetso4/gr/v20200729/wetso4_AERmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,pr,r1i1p1f1,r1i1p1f1,precipitation_flux,Precipitation,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/pr/gr/v20200729/pr_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,prsn,r1i1p1f1,r1i1p1f1,snowfall_flux,Snowfall Flux,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/prsn/gr/v20200729/prsn_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,psl,r1i1p1f1,r1i1p1f1,air_pressure_at_mean_sea_level,Sea Level Pressure,Pa,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/psl/gr/v20200729/psl_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/rlut/gr/v20200729/rlut_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/rsdt/gr/v20200729/rsdt_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/rsut/gr/v20200729/rsut_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/rtmt/gr/v20200729/rtmt_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,sfcWind,r1i1p1f1,r1i1p1f1,wind_speed,Near-Surface Wind Speed,m s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/sfcWind/gr/v20200729/sfcWind_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Amon,tas,r1i1p1f1,r1i1p1f1,air_temperature,Near-Surface Air Temperature,K,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Amon/tas/gr/v20200729/tas_Amon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFday,ps,r1i1p1f1,r1i1p1f1,surface_air_pressure,Surface Air Pressure,Pa,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFday/ps/gr/v20200729/ps_CFday_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFday,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFday/rsdt/gr/v20200729/rsdt_CFday_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFday,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFday/rsut/gr/v20200729/rsut_CFday_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFmon,ta,r1i1p1f1,r1i1p1f1,air_temperature,Air Temperature,K,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFmon/ta/gr/v20200729/ta_CFmon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1850-01-01 01:00:00,1860-01-01 00:00:00,1850-01-01 01:00:00-1860-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18500101010000-18600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1860-01-01 01:00:00,1870-01-01 00:00:00,1860-01-01 01:00:00-1870-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18600101010000-18700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1870-01-01 01:00:00,1880-01-01 00:00:00,1870-01-01 01:00:00-1880-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18700101010000-18800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1880-01-01 01:00:00,1890-01-01 00:00:00,1880-01-01 01:00:00-1890-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18800101010000-18900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1890-01-01 01:00:00,1900-01-01 00:00:00,1890-01-01 01:00:00-1900-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18900101010000-19000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1900-01-01 01:00:00,1910-01-01 00:00:00,1900-01-01 01:00:00-1910-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19000101010000-19100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1910-01-01 01:00:00,1920-01-01 00:00:00,1910-01-01 01:00:00-1920-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19100101010000-19200101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1920-01-01 01:00:00,1930-01-01 00:00:00,1920-01-01 01:00:00-1930-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19200101010000-19300101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1930-01-01 01:00:00,1940-01-01 00:00:00,1930-01-01 01:00:00-1940-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19300101010000-19400101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1940-01-01 01:00:00,1950-01-01 00:00:00,1940-01-01 01:00:00-1950-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19400101010000-19500101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1950-01-01 01:00:00,1960-01-01 00:00:00,1950-01-01 01:00:00-1960-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19500101010000-19600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1960-01-01 01:00:00,1970-01-01 00:00:00,1960-01-01 01:00:00-1970-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19600101010000-19700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1970-01-01 01:00:00,1980-01-01 00:00:00,1970-01-01 01:00:00-1980-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19700101010000-19800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1980-01-01 01:00:00,1990-01-01 00:00:00,1980-01-01 01:00:00-1990-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19800101010000-19900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1990-01-01 01:00:00,2000-01-01 00:00:00,1990-01-01 01:00:00-2000-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19900101010000-20000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,2000-01-01 01:00:00,2010-01-01 00:00:00,2000-01-01 01:00:00-2010-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20000101010000-20100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,2010-01-01 01:00:00,2015-01-01 00:00:00,2010-01-01 01:00:00-2015-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rlut/gn/v20200729/rlut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20100101010000-20150101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1850-01-01 01:00:00,1860-01-01 00:00:00,1850-01-01 01:00:00-1860-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18500101010000-18600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1860-01-01 01:00:00,1870-01-01 00:00:00,1860-01-01 01:00:00-1870-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18600101010000-18700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1870-01-01 01:00:00,1880-01-01 00:00:00,1870-01-01 01:00:00-1880-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18700101010000-18800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1880-01-01 01:00:00,1890-01-01 00:00:00,1880-01-01 01:00:00-1890-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18800101010000-18900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1890-01-01 01:00:00,1900-01-01 00:00:00,1890-01-01 01:00:00-1900-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18900101010000-19000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1900-01-01 01:00:00,1910-01-01 00:00:00,1900-01-01 01:00:00-1910-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19000101010000-19100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1910-01-01 01:00:00,1920-01-01 00:00:00,1910-01-01 01:00:00-1920-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19100101010000-19200101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1920-01-01 01:00:00,1930-01-01 00:00:00,1920-01-01 01:00:00-1930-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19200101010000-19300101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1930-01-01 01:00:00,1940-01-01 00:00:00,1930-01-01 01:00:00-1940-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19300101010000-19400101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1940-01-01 01:00:00,1950-01-01 00:00:00,1940-01-01 01:00:00-1950-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19400101010000-19500101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1950-01-01 01:00:00,1960-01-01 00:00:00,1950-01-01 01:00:00-1960-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19500101010000-19600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1960-01-01 01:00:00,1970-01-01 00:00:00,1960-01-01 01:00:00-1970-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19600101010000-19700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1970-01-01 01:00:00,1980-01-01 00:00:00,1970-01-01 01:00:00-1980-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19700101010000-19800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1980-01-01 01:00:00,1990-01-01 00:00:00,1980-01-01 01:00:00-1990-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19800101010000-19900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,1990-01-01 01:00:00,2000-01-01 00:00:00,1990-01-01 01:00:00-2000-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19900101010000-20000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,2000-01-01 01:00:00,2010-01-01 00:00:00,2000-01-01 01:00:00-2010-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20000101010000-20100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsdt,r1i1p1f1,r1i1p1f1,toa_incoming_shortwave_flux,TOA Incident Shortwave Radiation,W m-2,1,,2010-01-01 01:00:00,2015-01-01 00:00:00,2010-01-01 01:00:00-2015-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsdt/gn/v20200729/rsdt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20100101010000-20150101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1850-01-01 01:00:00,1860-01-01 00:00:00,1850-01-01 01:00:00-1860-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18500101010000-18600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1860-01-01 01:00:00,1870-01-01 00:00:00,1860-01-01 01:00:00-1870-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18600101010000-18700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1870-01-01 01:00:00,1880-01-01 00:00:00,1870-01-01 01:00:00-1880-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18700101010000-18800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1880-01-01 01:00:00,1890-01-01 00:00:00,1880-01-01 01:00:00-1890-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18800101010000-18900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1890-01-01 01:00:00,1900-01-01 00:00:00,1890-01-01 01:00:00-1900-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18900101010000-19000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1900-01-01 01:00:00,1910-01-01 00:00:00,1900-01-01 01:00:00-1910-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19000101010000-19100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1910-01-01 01:00:00,1920-01-01 00:00:00,1910-01-01 01:00:00-1920-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19100101010000-19200101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1920-01-01 01:00:00,1930-01-01 00:00:00,1920-01-01 01:00:00-1930-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19200101010000-19300101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1930-01-01 01:00:00,1940-01-01 00:00:00,1930-01-01 01:00:00-1940-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19300101010000-19400101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1940-01-01 01:00:00,1950-01-01 00:00:00,1940-01-01 01:00:00-1950-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19400101010000-19500101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1950-01-01 01:00:00,1960-01-01 00:00:00,1950-01-01 01:00:00-1960-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19500101010000-19600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1960-01-01 01:00:00,1970-01-01 00:00:00,1960-01-01 01:00:00-1970-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19600101010000-19700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1970-01-01 01:00:00,1980-01-01 00:00:00,1970-01-01 01:00:00-1980-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19700101010000-19800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1980-01-01 01:00:00,1990-01-01 00:00:00,1980-01-01 01:00:00-1990-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19800101010000-19900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,1990-01-01 01:00:00,2000-01-01 00:00:00,1990-01-01 01:00:00-2000-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19900101010000-20000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,2000-01-01 01:00:00,2010-01-01 00:00:00,2000-01-01 01:00:00-2010-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20000101010000-20100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rsut,r1i1p1f1,r1i1p1f1,toa_outgoing_shortwave_flux,TOA Outgoing Shortwave Radiation,W m-2,1,,2010-01-01 01:00:00,2015-01-01 00:00:00,2010-01-01 01:00:00-2015-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rsut/gn/v20200729/rsut_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20100101010000-20150101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1850-01-01 01:00:00,1860-01-01 00:00:00,1850-01-01 01:00:00-1860-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18500101010000-18600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1860-01-01 01:00:00,1870-01-01 00:00:00,1860-01-01 01:00:00-1870-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18600101010000-18700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1870-01-01 01:00:00,1880-01-01 00:00:00,1870-01-01 01:00:00-1880-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18700101010000-18800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1880-01-01 01:00:00,1890-01-01 00:00:00,1880-01-01 01:00:00-1890-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18800101010000-18900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1890-01-01 01:00:00,1900-01-01 00:00:00,1890-01-01 01:00:00-1900-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_18900101010000-19000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1900-01-01 01:00:00,1910-01-01 00:00:00,1900-01-01 01:00:00-1910-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19000101010000-19100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1910-01-01 01:00:00,1920-01-01 00:00:00,1910-01-01 01:00:00-1920-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19100101010000-19200101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1920-01-01 01:00:00,1930-01-01 00:00:00,1920-01-01 01:00:00-1930-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19200101010000-19300101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1930-01-01 01:00:00,1940-01-01 00:00:00,1930-01-01 01:00:00-1940-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19300101010000-19400101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1940-01-01 01:00:00,1950-01-01 00:00:00,1940-01-01 01:00:00-1950-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19400101010000-19500101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1950-01-01 01:00:00,1960-01-01 00:00:00,1950-01-01 01:00:00-1960-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19500101010000-19600101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1960-01-01 01:00:00,1970-01-01 00:00:00,1960-01-01 01:00:00-1970-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19600101010000-19700101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1970-01-01 01:00:00,1980-01-01 00:00:00,1970-01-01 01:00:00-1980-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19700101010000-19800101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1980-01-01 01:00:00,1990-01-01 00:00:00,1980-01-01 01:00:00-1990-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19800101010000-19900101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,1990-01-01 01:00:00,2000-01-01 00:00:00,1990-01-01 01:00:00-2000-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_19900101010000-20000101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,2000-01-01 01:00:00,2010-01-01 00:00:00,2000-01-01 01:00:00-2010-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20000101010000-20100101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,subhrPt,data sampled in model native grid by nearest neighbour method,gn,IPSL,100 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,CFsubhr,rtmt,r1i1p1f1,r1i1p1f1,net_downward_radiative_flux_at_top_of_atmosphere_model,Net Downward Flux at Top of Model,W m-2,1,,2010-01-01 01:00:00,2015-01-01 00:00:00,2010-01-01 01:00:00-2015-01-01 00:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/CFsubhr/rtmt/gn/v20200729/rtmt_CFsubhr_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_20100101010000-20150101000000.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Eday,ta850,r1i1p1f1,r1i1p1f1,air_temperature,Air Temperature,K,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Eday/ta850/gr/v20200729/ta850_Eday_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Eday,zmla,r1i1p1f1,r1i1p1f1,atmosphere_boundary_layer_thickness,Height of Boundary Layer,m,1,,1995-01-01 12:00:00,2014-12-31 12:00:00,1995-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Eday/zmla/gr/v20200729/zmla_Eday_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_19950101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,chl,r1i1p1f1,r1i1p1f1,mass_concentration_of_phytoplankton_expressed_as_chlorophyll_in_sea_water,Mass Concentration of Total Phytoplankton expressed as Chlorophyll in sea water,kg m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/chl/gn/v20200729/chl_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,dfe,r1i1p1f1,r1i1p1f1,mole_concentration_of_dissolved_iron_in_sea_water,Dissolved Iron Concentration,mol m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/dfe/gn/v20200729/dfe_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,dpco2,r1i1p1f1,r1i1p1f1,surface_carbon_dioxide_partial_pressure_difference_between_sea_water_and_air,Delta PCO2,Pa,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/dpco2/gn/v20200729/dpco2_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,epc100,r1i1p1f1,r1i1p1f1,sinking_mole_flux_of_particulate_organic_matter_expressed_as_carbon_in_sea_water,Downward Flux of Particulate Organic Carbon,mol m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/epc100/gn/v20200729/epc100_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,fgco2,r1i1p1f1,r1i1p1f1,surface_downward_mass_flux_of_carbon_dioxide_expressed_as_carbon,Surface Downward Flux of Total CO2,kg m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/fgco2/gn/v20200729/fgco2_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,intpp,r1i1p1f1,r1i1p1f1,net_primary_mole_productivity_of_biomass_expressed_as_carbon_by_phytoplankton,Primary Organic Carbon Production by All Types of Phytoplankton,mol m-2 s-1,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/intpp/gn/v20200729/intpp_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,mlotst,r1i1p1f1,r1i1p1f1,ocean_mixed_layer_thickness_defined_by_sigma_t,Ocean Mixed Layer Thickness Defined by Sigma T,m,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/mlotst/gn/v20200729/mlotst_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,no3,r1i1p1f1,r1i1p1f1,mole_concentration_of_nitrate_in_sea_water,Dissolved Nitrate Concentration,mol m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/no3/gn/v20200729/no3_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,o2,r1i1p1f1,r1i1p1f1,mole_concentration_of_dissolved_molecular_oxygen_in_sea_water,Dissolved Oxygen Concentration,mol m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/o2/gn/v20200729/o2_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,o2min,r1i1p1f1,r1i1p1f1,mole_concentration_of_dissolved_molecular_oxygen_in_sea_water_at_shallowest_local_minimum_in_vertical_profile,Oxygen Minimum Concentration,mol m-3,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/o2min/gn/v20200729/o2min_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,phyc,r1i1p1f1,r1i1p1f1,mole_concentration_of_phytoplankton_expressed_as_carbon_in_sea_water,Phytoplankton Carbon Concentration,mol m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/phyc/gn/v20200729/phyc_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,po4,r1i1p1f1,r1i1p1f1,mole_concentration_of_dissolved_inorganic_phosphorus_in_sea_water,Total Dissolved Inorganic Phosphorus Concentration,mol m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/po4/gn/v20200729/po4_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,rsntds,r1i1p1f1,r1i1p1f1,net_downward_shortwave_flux_at_sea_water_surface,Net Downward Shortwave Radiation at Sea Water Surface,W m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/rsntds/gn/v20200729/rsntds_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,si,r1i1p1f1,r1i1p1f1,mole_concentration_of_dissolved_inorganic_silicon_in_sea_water,Total Dissolved Inorganic Silicon Concentration,mol m-3,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/si/gn/v20200729/si_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,so,r1i1p1f1,r1i1p1f1,sea_water_salinity,Sea Water Salinity,0.001,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/so/gn/v20200729/so_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,tauuo,r1i1p1f1,r1i1p1f1,surface_downward_x_stress,Surface Downward X Stress,N m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/tauuo/gn/v20200729/tauuo_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,tauvo,r1i1p1f1,r1i1p1f1,surface_downward_y_stress,Surface Downward Y Stress,N m-2,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/tauvo/gn/v20200729/tauvo_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,thetao,r1i1p1f1,r1i1p1f1,sea_water_potential_temperature,Sea Water Potential Temperature,degC,31,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/thetao/gn/v20200729/thetao_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocean,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,tos,r1i1p1f1,r1i1p1f1,sea_surface_temperature,Sea Surface Temperature,degC,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/tos/gn/v20200729/tos_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,ocnBgchem,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,Omon,zo2min,r1i1p1f1,r1i1p1f1,depth_at_shallowest_local_minimum_in_vertical_profile_of_mole_concentration_of_dissolved_molecular_oxygen_in_sea_water,Depth of Oxygen Minimum Concentration,m,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/Omon/zo2min/gn/v20200729/zo2min_Omon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,mon,native ocean ORCA2 tripolar primarily 2deg grid,gn,IPSL,250 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,seaIce,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,SImon,siconc,r1i1p1f1,r1i1p1f1,sea_ice_area_fraction,Sea Ice Area Fraction (Ocean Grid),%,1,,1850-01-16 12:00:00,2014-12-16 12:00:00,1850-01-16 12:00:00-2014-12-16 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/SImon/siconc/gn/v20200729/siconc_SImon_IPSL-CM5A2-INCA_historical_r1i1p1f1_gn_185001-201412.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,day,hurs,r1i1p1f1,r1i1p1f1,relative_humidity,Near-Surface Relative Humidity,%,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/day/hurs/gr/v20200729/hurs_day_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,day,rlut,r1i1p1f1,r1i1p1f1,toa_outgoing_longwave_flux,TOA Outgoing Longwave Radiation,W m-2,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/day/rlut/gr/v20200729/rlut_day_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,day,sfcWind,r1i1p1f1,r1i1p1f1,wind_speed,Daily-Mean Near-Surface Wind Speed,m s-1,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/day/sfcWind/gr/v20200729/sfcWind_day_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 +CMIP,standard,0.0,7305.0,all-forcing simulation of the recent past,historical,day,LMDZ grid,gr,IPSL,500 km,CMIP,piControl,IPSL-CM5A2-INCA,days since 1850-01-01 00:00:00,r1i1p1f1,model-output,atmos,IPSL-CM5A2-INCA,AOGCM BGC AER CHEM,none,none,day,tas,r1i1p1f1,r1i1p1f1,air_temperature,Near-Surface Air Temperature,K,1,,1850-01-01 12:00:00,2014-12-31 12:00:00,1850-01-01 12:00:00-2014-12-31 12:00:00,/uda/cmip6/CMIP/IPSL/IPSL-CM5A2-INCA/historical/r1i1p1f1/day/tas/gr/v20200729/tas_day_IPSL-CM5A2-INCA_historical_r1i1p1f1_gr_18500101-20141231.nc,v20200729 diff --git a/tools/catalog_builder/examples/cmip/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.json b/tools/catalog_builder/examples/cmip/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.json new file mode 100644 index 000000000..0bf72d497 --- /dev/null +++ b/tools/catalog_builder/examples/cmip/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.json @@ -0,0 +1,191 @@ +{ + "esmcat_version": "0.0.1", + "attributes": [ + { + "column_name": "activity_id", + "vocabulary": "" + }, + { + "column_name": "branch_method", + "vocabulary": "" + }, + { + "column_name": "branch_time_in_child", + "vocabulary": "" + }, + { + "column_name": "branch_time_in_parent", + "vocabulary": "" + }, + { + "column_name": "experiment", + "vocabulary": "" + }, + { + "column_name": "experiment_id", + "vocabulary": "" + }, + { + "column_name": "frequency", + "vocabulary": "" + }, + { + "column_name": "grid", + "vocabulary": "" + }, + { + "column_name": "grid_label", + "vocabulary": "" + }, + { + "column_name": "institution_id", + "vocabulary": "" + }, + { + "column_name": "nominal_resolution", + "vocabulary": "" + }, + { + "column_name": "parent_activity_id", + "vocabulary": "" + }, + { + "column_name": "parent_experiment_id", + "vocabulary": "" + }, + { + "column_name": "parent_source_id", + "vocabulary": "" + }, + { + "column_name": "parent_time_units", + "vocabulary": "" + }, + { + "column_name": "parent_variant_label", + "vocabulary": "" + }, + { + "column_name": "product", + "vocabulary": "" + }, + { + "column_name": "realm", + "vocabulary": "" + }, + { + "column_name": "source_id", + "vocabulary": "" + }, + { + "column_name": "source_type", + "vocabulary": "" + }, + { + "column_name": "sub_experiment", + "vocabulary": "" + }, + { + "column_name": "sub_experiment_id", + "vocabulary": "" + }, + { + "column_name": "table_id", + "vocabulary": "" + }, + { + "column_name": "variable_id", + "vocabulary": "" + }, + { + "column_name": "variant_label", + "vocabulary": "" + }, + { + "column_name": "member_id", + "vocabulary": "" + }, + { + "column_name": "standard_name", + "vocabulary": "" + }, + { + "column_name": "long_name", + "vocabulary": "" + }, + { + "column_name": "units", + "vocabulary": "" + }, + { + "column_name": "vertical_levels", + "vocabulary": "" + }, + { + "column_name": "init_year", + "vocabulary": "" + }, + { + "column_name": "start_time", + "vocabulary": "" + }, + { + "column_name": "end_time", + "vocabulary": "" + }, + { + "column_name": "time_range", + "vocabulary": "" + }, + { + "column_name": "path", + "vocabulary": "" + }, + { + "column_name": "version", + "vocabulary": "" + } + ], + "assets": { + "column_name": "path", + "format": "netcdf", + "format_column_name": null + }, + "aggregation_control": { + "variable_column_name": "variable_id", + "groupby_attrs": [ + "activity_id", + "institution_id", + "source_id", + "experiment_id", + "frequency", + "member_id", + "table_id", + "grid_label", + "realm", + "variant_label", + "time_range" + ], + "aggregations": [ + { + "type": "union", + "attribute_name": "variable_id", + "options": {} + }, + { + "type": "join_existing", + "attribute_name": "time_range", + "options": { + "dim": "time", + "coords": "minimal", + "compat": "override" + } + } + ] + }, + "id": "esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1", + "description": null, + "title": null, + "last_updated": "2023-04-28T20:18:34Z", + "catalog_file": "file:///nbhome/jml/esm_catalog_IPSL-CM5A2-INCA_historical_r1i1p1f1.csv" +} \ No newline at end of file diff --git a/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.json b/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.json index 7c72c852a..9217106fd 100644 --- a/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.json +++ b/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.json @@ -111,5 +111,5 @@ "description": null, "title": null, "last_updated": "2023-05-07T16:35:52Z", - "catalog_file": "file:///nbhome/jml/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.csv" + "catalog_file": "file:////local2/home/mdtf/MDTF-diagnostics/tools/catalog_builder/examples/gfdl/esm_catalog_DECK_ESM4_historical_D1_atmos_ts_monthly_5yr_subset.csv" } \ No newline at end of file diff --git a/tools/catalog_builder/examples/templates/catalog_builder_slurm.sh b/tools/catalog_builder/examples/templates/catalog_builder_slurm.sh index 66a459ca5..879f5856d 100755 --- a/tools/catalog_builder/examples/templates/catalog_builder_slurm.sh +++ b/tools/catalog_builder/examples/templates/catalog_builder_slurm.sh @@ -4,14 +4,14 @@ # > cd /nbhome/[USERNAME]/mdtf/MDTF-diagnostics/tools/catalog_builder # > sbatch examples/templates/catalog_builder_slurm.sh --config examples/templates/example_builder_config.yml #SBATCH --job-name=esm_cat_builder -#SBATCH --chdir=/nbhome/[USERNAME] -#SBATCH --output=/nbhome/[USERNAME]/logs/slurm_%x.%A_%a.out -#SBATCH --error=/nbhome/[USERNAME]/logs/slurm_%x.%A_%a.err +#SBATCH --chdir=/nbhome/jml +#SBATCH --output=/nbhome/jml/logs/slurm_%x.%A_%a.out +#SBATCH --error=/nbhome/jml/logs/slurm_%x.%A_%a.err #SBATCH --time=1:00:00 #SBATCH --ntasks=8 #SBATCH --constraint=bigmem # -local_repo="/nbhome/[USERNAME]/mdtf/MDTF-diagnostics/tools/catalog_builder" +local_repo="/nbhome/jml/catalog_builder" _mdtf="/home/oar.gfdl.mdtf/mdtf/MDTF-diagnostics" source "${_mdtf}/src/conda/conda_init.sh" -q "/home/oar.gfdl.mdtf/miniconda3" conda activate _MDTF_base diff --git a/user_scripts/__init__.py b/user_scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/user_scripts/example_pp_script.py b/user_scripts/example_pp_script.py new file mode 100755 index 000000000..93e41a0b2 --- /dev/null +++ b/user_scripts/example_pp_script.py @@ -0,0 +1,207 @@ +"""Example custom preprocessing script to include in framework workflow""" +import os +import sys +import io +import collections +import numpy as np +import pandas as pd +import xarray as xr +import logging +from src.util import datelabel +from src.util import NameSpace +import src.util.json_utils + +# Define a log object for debugging +_log = logging.getLogger(__name__) + +# check_group_daterange is a helper script used by the preprocessor +# and included in this example custom preprocessing script for testing + + +def check_group_daterange(group_df: pd.DataFrame) -> pd.DataFrame: + """Sort the files found for each experiment by date, verify that + the date ranges contained in the files are contiguous in time and that + the date range of the files spans the query date range. + + Args: + group_df (Pandas Dataframe): + log: log file + """ + date_col = "date_range" + try: + # method throws ValueError if ranges aren't contiguous + dates_df = group_df.loc[:, ['start_time', 'end_time']] + date_range_vals = [] + for idx, x in enumerate(group_df.values): + st = dates_df.at[idx, 'start_time'] + en = dates_df.at[idx, 'end_time'] + date_range_vals.append(datelabel.DateRange(st, en)) + + group_df = group_df.assign(date_range=date_range_vals) + sorted_df = group_df.sort_values(by=date_col) + + # throws ValueError if we don't span the query range + return sorted_df + except ValueError: + logging.error("Non-contiguous or malformed date range in files:", sorted_df["path"].values) + return pd.DataFrame(columns=group_df.columns) + +# rename_dataset_leus is a helper script used by the preprocessor +# and included in this example custom preprocessing script for testing + + +def rename_dataset_keys(ds: dict, case_list: dict) -> collections.OrderedDict: + """Rename dataset keys output by ESM intake catalog query to case names`""" + + def rename_key(old_dict: dict, new_dict: collections.OrderedDict, old_key, new_key): + """Credit: https://stackoverflow.com/questions/16475384/rename-a-dictionary-key""" + new_dict[new_key] = old_dict[old_key] + + new_dict = collections.OrderedDict() + case_names = [c for c in case_list.keys()] + for old_key, case_d in ds.items(): + (path, filename) = os.path.split(case_d.attrs['intake_esm_attrs:path']) + rename_key(ds, new_dict, old_key, [c for c in case_names if c in filename][0]) + return new_dict + + +# unit test for basic functionality +def test_example_script() -> str: + test_str = "Testing call to example_pp_script" + print(test_str) + return test_str + + +# Main script that works on the xarray dataset that the framework reads from the input data catalog +# The main script mirrors the preprocessor functions that operate separately on each variable in every case in an +# xarray dataset: +# for case_name, case_xr_dataset in cat_subset.items(): +# for v in case_list[case_name].varlist.keys(): +# +# Functions adapted from albedofb_calcs.py + + +def main(xr_ds: xr.Dataset, var: str) -> xr.Dataset: + # 1. Reshape the data array to convert dimensions to sub-dimensions defined by "coords" and "new_dims" + # define coordinate and new_dims arrays + ny = int(xr_ds['time'].sizes['time'] / 365) + coords = [np.arange(ny), np.arange(365)] + new_dims = ['year', 'day'] + + # Create a pandas MultiIndex + ind = pd.MultiIndex.from_product(coords, names=new_dims) + + # get the variable data array + xr_dupe = xr_ds[var].copy() + + # Replace the time index in the DataArray by this new index + xr_dupe.coords['time'] = ind + + # Convert multi-index to individual dims using DataArray.unstack(). + # This changes dimension order! The new dimensions are at the end. + xr_dupe = xr_dupe.unstack('time') + + # Permute to restore dimensions + i = xr_ds[var].dims.index('time') + dims = list(xr_dupe.dims) + + # insert the new dimension names into the dataset + for d in new_dims[::-1]: + dims.insert(i, d) + + for d in new_dims: + _ = dims.pop(-1) + + xr_dupe = xr_dupe.transpose(*dims) + + # 2. compute the annual mean for each day + return xr_dupe.mean(dim='year') + + +# Anything in this block executes if the script is run on its own +# > python3 example_pp_script.py +# The following code reads a data catalog subset into an xarray dataset in a python dictionary +# and computes a time average on reshaped arrays of air temperature (tas) for each case defined in the input +# configuration file + + +if __name__ == '__main__': + import intake + + # root directory of this script + code_root = os.path.dirname(os.path.realpath(__file__)) + + # full path to the runtime configuration file + # config_file = "[path to configuration file]/runtime_config.jsons" + config_file = "/Users/jess/mdtf/MDTF-diagnostics/templates/runtime_config.jsonc" + + # read the contents of the configuration file into a NameSpace (basically a dict with dot notation) + with io.open(config_file, 'r', encoding='utf-8') as file_: + str_ = file_.read() + json_config = src.util.json_utils.parse_json(str_) + config = NameSpace.fromDict({k: json_config[k] for k in json_config.keys()}) + + # full path to the input data catalog json file + # data_catalog = "[path_to_catalog]/[catalog_name].json" + data_catalog = config.DATA_CATALOG + + # open the csv file using information provided by the catalog definition file + cat = intake.open_esm_datastore(data_catalog) + + # dictionary to hold the data subset returned by the catalog query + cat_dict = {} + + # create filter lists for POD variables + cols = list(cat.df.columns.values) + + # Add a date_range column to the catalog dictionary if necessary + if 'date_range' not in [c.lower() for c in cols]: + cols.append('date_range') + + # define a variable dictionary with the name, standard_name, realm, output frequency, and any other attributes + # you want to use in the catalog query + # note that this example uses daily data + var_list = {"tas": + { + "standard_name": "air_temperature", + "freq": "day", + "realm": "atmos" + } + } + + # loop through the case list and read in the desired files + for case_name, case_d in config.case_list.items(): + path_regex = case_name + '*' # use wild cards to find the appropriate case + + # loop through the variables in the dictionary + for k, v in var_list.items(): + cat_subset = cat.search(activity_id=case_d.convention, + standard_name=v['standard_name'], + frequency=v['freq'], + realm=v['realm'], + path=path_regex + ) + if cat_subset.df.empty: + logging.error(f"No assets found for {case_name} in {data_catalog}") + + # Get files in specified date range + cat_subset.esmcat._df = check_group_daterange(cat_subset.df) + + # convert subset catalog to an xarray dataset dict + # and concatenate the result with the final dict + cat_dict = cat_dict | cat_subset.to_dataset_dict( + progressbar=False, + xarray_open_kwargs={"decode_times": True, + "use_cftime": True + } + ) + + # rename cat_subset case dict keys to case names + new_cat = rename_dataset_keys(cat_dict, config.case_list) + + # run the main routine on the xarray dataset + for case_name, case_xr_dataset in new_cat.items(): + for var_name in var_list.keys(): + xr_ds_new = main(case_xr_dataset, var_name) + case_xr_dataset = xr_ds_new + sys.exit(0)