Skip to content

Commit

Permalink
Merge pull request #995 from ekatef/add_demand_parser
Browse files Browse the repository at this point in the history
Add csv-demand parser
  • Loading branch information
davide-f authored Jun 17, 2024
2 parents 5d1f86e + ef4dc69 commit 2c33699
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 10 deletions.
1 change: 1 addition & 0 deletions Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ RDIR = run["name"] + "/" if run.get("name") else ""
CDIR = RDIR if not run.get("shared_cutouts") else ""

load_data_paths = get_load_paths_gegis("data", config)

if config["enable"].get("retrieve_cost_data", True):
COSTS = "resources/" + RDIR + "costs.csv"
else:
Expand Down
14 changes: 14 additions & 0 deletions doc/customization_basic1.rst
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,20 @@ Year-related parameters are also being used when specifying `load_options`:
The `weather_year` value corresponds to the weather data which was used to generate the electricity demand profiles for a selected area while `prediction_year` corresponds to the point of a `Shared Socioeconomic Pathways (SSP) <https://en.wikipedia.org/wiki/Shared_Socioeconomic_Pathways>`__ trajectory. PyPSA-Earth uses SSP2-2.6 scenario within the Shared Socioeconomic Pathways framework, which is characterized by medium challenges to mitigation and adaptation efforts resulting in a global warming of approximately 2.6°C by the end of the 21st century.
The available values for `weather_year` and `prediction_year` can be checked by looking into `pypsa-earth/data/ssp2-2.6` folder. Currently, there are pre-calculated demand data for 2011, 2013, 2018 weather years and for 2030, 2040, 2050, and 2100 scenario prediction years.

Use custom demand data
----------------------

It is possible to implement custom demand profiles. It can be done by creating a dedicated custom demand sub-folder in a scenario folder `pypsa-earth/data/ssp2-2.6` and placing there a custom demand file. The name of a custom demand sub-folder should correspond to `weather_year` argument which stands in this case for general identification of a demand input. The name of a demand input file should be a continent name to which belongs a country of initerest. Both csv and nc formats can be used for demand files.

For example, to `pypsa-earth/data/ssp2-2.6/2013_custom/`

.. note::

For example, to provide custom inputs for Nigeria, you can put the time-series into `Africa.csv` file and place the file into `pypsa-earth/data/ssp2-2.6/2013_custom/` folder. To make it fetched, you'll need to specify `weather_year: 2013_custom` under `load_options`.

A format of the custom csv demand file should correspond to the csv files supplied with the model: there are `region_code`, `time`, `region_name` and `Electricity demand` columns, while a semicolon is used as a separator.


Configure `atlite` section
--------------------------

Expand Down
2 changes: 2 additions & 0 deletions doc/release_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ E.g. if a new rule becomes available describe how to use it `snakemake -j1 run_t

* Add an option to merge isolated networks into respective backbone networks by countries. `PR #903 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/903>`__

* Add an option to use csv format for custom demand imports. `PR #995 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/995>`__

**Minor Changes and bug-fixing**

* Minor bug-fixing to run the cluster wildcard min `PR #1019 <https://github.com/pypsa-meets-earth/pypsa-earth/pull/1019>`__
Expand Down
68 changes: 58 additions & 10 deletions scripts/build_demand_profiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
it returns a csv file called "demand_profiles.csv", that allocates the load to the buses of the network according to GDP and population.
"""
import os
import os.path
from itertools import product

import geopandas as gpd
Expand All @@ -49,7 +50,7 @@
import pypsa
import scipy.sparse as sparse
import xarray as xr
from _helpers import configure_logging, create_logger, read_osm_config
from _helpers import configure_logging, create_logger, read_csv_nafix, read_osm_config
from shapely.prepared import prep
from shapely.validation import make_valid

Expand Down Expand Up @@ -106,16 +107,34 @@ def get_load_paths_gegis(ssp_parentfolder, config):
prediction_year = config.get("load_options")["prediction_year"]
ssp = config.get("load_options")["ssp"]

scenario_path = os.path.join(ssp_parentfolder, ssp)

load_paths = []
load_dir = os.path.join(
ssp_parentfolder,
str(ssp),
str(prediction_year),
"era5_" + str(weather_year),
)

file_names = []
for continent in region_load:
load_path = os.path.join(
ssp_parentfolder,
str(ssp),
str(prediction_year),
"era5_" + str(weather_year),
str(continent) + ".nc",
)
sel_ext = ".nc"
for ext in [".nc", ".csv"]:
load_path = os.path.join(str(load_dir), str(continent) + str(ext))
if os.path.exists(load_path):
sel_ext = ext
break
file_name = str(continent) + str(sel_ext)
load_path = os.path.join(str(load_dir), file_name)
load_paths.append(load_path)
file_names.append(file_name)

logger.info(
f"Demand data folder: {load_dir}, load path is {load_paths}.\n"
+ f"Expected files: "
+ "; ".join(file_names)
)

return load_paths

Expand All @@ -135,6 +154,23 @@ def shapes_to_shapes(orig, dest):
return transfer


def load_demand_csv(path):
df = read_csv_nafix(path, sep=";")
df.time = pd.to_datetime(df.time, format="%Y-%m-%d %H:%M:%S")
load_regions = {c: n for c, n in zip(df.region_code, df.region_name)}

gegis_load = df.set_index(["region_code", "time"]).to_xarray()
gegis_load = gegis_load.assign_coords(
{
"region_name": (
"region_code",
[name for (code, name) in load_regions.items()],
)
}
)
return gegis_load


def build_demand_profiles(
n,
load_paths,
Expand Down Expand Up @@ -174,9 +210,21 @@ def build_demand_profiles(
substation_lv_i = n.buses.index[n.buses["substation_lv"]]
regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i)
load_paths = load_paths
# Merge load .nc files: https://stackoverflow.com/questions/47226429/join-merge-multiple-netcdf-files-using-xarray
gegis_load = xr.open_mfdataset(load_paths, combine="nested")

gegis_load_list = []

for path in load_paths:
if str(path).endswith(".csv"):
gegis_load_xr = load_demand_csv(path)
else:
# Merge load .nc files: https://stackoverflow.com/questions/47226429/join-merge-multiple-netcdf-files-using-xarray
gegis_load_xr = xr.open_mfdataset(path, combine="nested")
gegis_load_list.append(gegis_load_xr)

logger.info(f"Merging demand data from paths {load_paths} into the load data frame")
gegis_load = xr.merge(gegis_load_list)
gegis_load = gegis_load.to_dataframe().reset_index().set_index("time")

# filter load for analysed countries
gegis_load = gegis_load.loc[gegis_load.region_code.isin(countries)]

Expand Down

0 comments on commit 2c33699

Please sign in to comment.