From 298d5979bb6aee03621616f362fa1322c03a6be0 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Wed, 31 Jan 2024 02:11:22 +0100 Subject: [PATCH 1/5] fixed warning while building docs --- doc/api_reference.rst | 6 -- doc/conf.py | 2 +- doc/installation.rst | 2 +- doc/plotting.rst | 2 +- doc/populate/build_demand_profiles.rst | 2 +- doc/requirements.txt | 20 +++++-- doc/solving/solve_operations_network.rst | 4 +- scripts/_helpers.py | 10 ++-- scripts/build_demand_profiles.py | 2 +- scripts/build_powerplants.py | 9 ++- scripts/build_shapes.py | 73 ++++++++++++++---------- scripts/clean_osm_data.py | 2 + scripts/download_osm_data.py | 2 +- scripts/make_statistics.py | 1 + scripts/make_summary.py | 30 ++++++---- scripts/monte_carlo.py | 14 +++-- 16 files changed, 108 insertions(+), 73 deletions(-) diff --git a/doc/api_reference.rst b/doc/api_reference.rst index 5cf8fdf3a..031b3c4d0 100644 --- a/doc/api_reference.rst +++ b/doc/api_reference.rst @@ -80,12 +80,6 @@ clean_osm_data .. automodule:: clean_osm_data :members: -config_osm_data -------------------------------- - -.. automodule:: config_osm_data - :members: - download_osm_data ------------------------------- diff --git a/doc/conf.py b/doc/conf.py index 5b27d20b1..8bd5c798a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -183,4 +183,4 @@ # man_show_urls = False # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {"https://docs.python.org/": None} +intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} diff --git a/doc/installation.rst b/doc/installation.rst index f9123dc02..51a37ef24 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -129,7 +129,7 @@ To confirm the installation, run the following command in the activated environm Solver Installation ------------------- +-------------------- An optimization solver is needed to solve the mathematical problem that is build with the automated workflow of PyPSA-Earth. With the goal of supporting completely open source initiative, we focus on relying on Open-Source solvers, such as diff --git a/doc/plotting.rst b/doc/plotting.rst index 723da0e0e..e10f83b47 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -41,7 +41,7 @@ Rule ``plot_p_nom_max`` | -.. automodule:: plot_p_nom_max +.. .. automodule:: plot_p_nom_max .. _summary: diff --git a/doc/populate/build_demand_profiles.rst b/doc/populate/build_demand_profiles.rst index 1d342ff86..eb3561ff8 100644 --- a/doc/populate/build_demand_profiles.rst +++ b/doc/populate/build_demand_profiles.rst @@ -5,7 +5,7 @@ .. _load_data: Rule ``build_demand_profiles`` -============================= +=============================== .. automodule:: build_demand_profiles diff --git a/doc/requirements.txt b/doc/requirements.txt index 5a58f40f9..d2b518fd8 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -10,11 +10,13 @@ myst-parser # recommark is deprecated, https://stackoverflow.com/a/71660856/135 pypsa vresutils>=0.3.1 -powerplantmatching>=0.4.8 -atlite>=0.2.2 -dask<=2021.3.1 +powerplantmatching>=0.5.5 +atlite>=0.2.9 +dask[distributed] +matplotlib<=3.5.2 +tabula-py -# cartopy +cartopy scikit-learn pycountry pyyaml @@ -23,9 +25,15 @@ memory_profiler tables descartes -esy-osm-pbf -esy-osmfilter rioxarray git+https://github.com/davide-f/google-drive-downloader@master # if not included will create error in docs `make html` gitpython + +chaospy +numba +ruamel.yaml<=0.17.26 +earth-osm>=0.1.0, <0.2.0 +reverse-geocode +pyDOE2 +# graphviz diff --git a/doc/solving/solve_operations_network.rst b/doc/solving/solve_operations_network.rst index 314ed1c26..c0be2f9b9 100644 --- a/doc/solving/solve_operations_network.rst +++ b/doc/solving/solve_operations_network.rst @@ -38,5 +38,5 @@ Rule ``solve_operations_network`` | -.. automodule:: solve_operations_network - :noindex: +.. .. automodule:: solve_operations_network +.. :noindex: diff --git a/scripts/_helpers.py b/scripts/_helpers.py index d936c4c47..ad229b002 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -459,13 +459,13 @@ def mock_snakemake(rulename, **wildcards): the snakemake project. It returns a snakemake.script.Snakemake object, based on the Snakefile. - If a rule has wildcards, you have to specify them in **wildcards. + If a rule has wildcards, you have to specify them in **wildcards**. Parameters ---------- rulename: str name of the rule for which the snakemake object should be generated - **wildcards: + wildcards: keyword arguments fixing the wildcards. Only necessary if wildcards are needed. """ @@ -718,7 +718,7 @@ def read_geojson(fn, cols=[], dtype=None, crs="EPSG:4326"): columns specified by the dtype dictionary it not none. Parameters: - ---------- + ------------ fn : str Path to the file to read cols : list @@ -742,12 +742,12 @@ def read_geojson(fn, cols=[], dtype=None, crs="EPSG:4326"): def create_country_list(input, iso_coding=True): """ - Create a country list for defined regions in config_osm_data.py. + Create a country list for defined regions in osm_config.yaml. Parameters ---------- input : str - Any two-letter country name, regional name, or continent given in config_osm_data.py + Any two-letter country name, regional name, or continent given in osm_config.yaml Country name duplications won't distort the result. Examples are: ["NG","ZA"], downloading osm data for Nigeria and South Africa diff --git a/scripts/build_demand_profiles.py b/scripts/build_demand_profiles.py index 6a76c2275..d03e7daa6 100644 --- a/scripts/build_demand_profiles.py +++ b/scripts/build_demand_profiles.py @@ -23,7 +23,7 @@ ------ - ``networks/base.nc``: confer :ref:`base`, a base PyPSA Network -- ``resources/bus_regions/regions_onshore.geojson``: confer :ref:`build_bus_regions` +- ``resources/bus_regions/regions_onshore.geojson``: confer :mod:`build_bus_regions` - ``load_data_paths``: paths to load profiles, e.g. hourly country load profiles produced by GEGIS - ``resources/shapes/gadm_shapes.geojson``: confer :ref:`shapes`, file containing the gadm shapes diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index edfc86ce0..1f76df39c 100644 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -40,6 +40,7 @@ ----------- The configuration options ``electricity: powerplants_filter`` and ``electricity: custom_powerplants`` can be used to control whether data should be retrieved from the original powerplants database or from custom amendmends. These specify `pandas.query `_ commands. + 1. Adding all powerplants from custom: .. code:: yaml @@ -69,20 +70,26 @@ custom_powerplants: YearCommissioned <= 2015 Format required for the custom_powerplants.csv should be similar to the powerplantmatching format with some additional considerations: + Columns required: [id, Name, Fueltype, Technology, Set, Country, Capacity, Efficiency, DateIn, DateRetrofit, DateOut, lat, lon, Duration, Volume_Mm3, DamHeight_m, StorageCapacity_MWh, EIC, projectID] Tagging considerations for columns in the file: + - FuelType: 'Natural Gas' has to be tagged either as 'OCGT', 'CCGT' - Technology: 'Reservoir' has to be set as 'ror' if hydro powerplants are to be considered as 'Generators' and not 'StorageUnits' -- Country: Country name has to be defined with its alpha2 code ('NG' for Nigeria,'BO' for Bolivia, 'FR' for France, etc.) +- Country: Country name has to be defined with its alpha2 code ('NG' for Nigeria,'BO' for Bolivia, 'FR' for France, etc. The following assumptions were done to map custom OSM-extracted power plants with powerplantmatching format. + 1. The benchmark PPM keys values were taken as follows: 'Fueltype': ['Hydro', 'Hard Coal', 'Natural Gas', 'Lignite', 'Nuclear', 'Oil', 'Bioenergy' 'Wind', 'Geothermal', 'Solar', 'Waste', 'Other'] + 'Technology': ['Reservoir', 'Pumped Storage', 'Run-Of-River', 'Steam Turbine', 'CCGT', 'OCGT' 'Pv', 'CCGT, Thermal', 'Offshore', 'Storage Technologies'] + 'Set': ['Store', 'PP', 'CHP'] + 2. OSM-extracted features were mapped into PPM ones using a (quite arbitrary) set of rules: 'coal': 'Hard Coal' 'wind_turbine': 'Onshore', diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index ccb303899..d0e7381aa 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -30,6 +30,9 @@ two_2_three_digits_country, two_digits_2_name_country, ) +from numba import njit +from numba.core import types +from numba.typed import Dict from rasterio.mask import mask from rasterio.windows import Window from shapely.geometry import LineString, MultiPolygon, Point, Polygon @@ -40,9 +43,6 @@ sets_path_to_root("pypsa-earth") -from numba import njit -from numba.core import types -from numba.typed import Dict logger = create_logger(__name__) @@ -360,11 +360,9 @@ def eez( tolerance=0.01, ): """ - Creates offshore shapes by. - - - buffer smooth countryshape (=offset country shape) - - and differ that with the offshore shape - Leads to for instance a 100m non-build coastline + Creates offshore shapes by buffer smooth countryshape (=offset country + shape) and differ that with the offshore shape which leads to for instance + a 100m non-build coastline. """ if out_logging: @@ -729,11 +727,13 @@ def process_function_population(row_id): Function that reads the task from df_tasks and executes all the methods. to obtain population values for the specified region - ------- + Inputs: + ------- row_id: integer which indicates a specific row of df_tasks - -------- + Outputs: + -------- windowed_pop_count: Dataframe containing "GADM_ID" and "pop" columns It represents the amount of population per region (GADM_ID), for the settings given by the row in df_tasks @@ -776,13 +776,15 @@ def process_function_population(row_id): def get_worldpop_val_xy(WorldPop_inputfile, window_dimensions): """ - Function to extract data from .tif input file - ------- + Function to extract data from .tif input file. + Inputs: + ------- WorldPop_inputfile: file location of worldpop file window_dimensions: dimensions of window used when reading file - -------- + Outputs: + -------- np_pop_valid: array filled with values for each nonzero pixel in the worldpop file np_pop_xy: array with [x,y] coordinates of the corresponding nonzero values in np_pop_valid """ @@ -820,16 +822,18 @@ def compute_geomask_region( country_rows, affine_transform, window_dimensions, latlong_topleft, latlong_botright ): """ - Function to mask geometries into np_map_ID using an incrementing counter - ------- + Function to mask geometries into np_map_ID using an incrementing counter. + Inputs: + ------- country_rows: geoDataFrame filled with geometries and their GADM_ID affine_transform: affine transform of current window window_dimensions: dimensions of window used when reading file latlong_topleft: [latitude, longitude] of top left corner of the window latlong_botright: [latitude, longitude] of bottom right corner of the window - -------- + Outputs: + -------- np_map_ID.astype("H"): np_map_ID contains an ID for each location (undefined is 0) dimensions are taken from window_dimensions, .astype("H") for memory savings id_result: @@ -894,14 +898,16 @@ def sum_values_using_geomask(np_pop_val, np_pop_xy, region_geomask, id_mapping): GADM_ID It uses np_pop_xy to access the key stored in region_geomask[x][y] The relation of this key to GADM_ID is stored in id_mapping - ------- + Inputs: + ------- np_pop_val: array filled with values for each nonzero pixel in the worldpop file np_pop_xy: array with [x,y] coordinates of the corresponding nonzero values in np_pop_valid region_geomask: array with dimensions of window, values are keys that map to GADM_ID using id_mapping id_mapping: Dataframe that contains mappings of region_geomask values to GADM_IDs - -------- + Outputs: + -------- df_pop_count: Dataframe with columns - "GADM_ID" - "pop" containing population of GADM_ID region @@ -942,15 +948,17 @@ def loop_and_extact_val_x_y( population values from np_pop_val and stores them in np_pop_count. where each location in np_pop_count is mapped to a GADM_ID through dict_id (id_mapping by extension) - ------- + Inputs: + ------- np_pop_count: np.zeros array, which will store population counts np_pop_val: array filled with values for each nonzero pixel in the worldpop file np_pop_xy: array with [x,y] coordinates of the corresponding nonzero values in np_pop_valid region_geomask: array with dimensions of window, values are keys that map to GADM_ID using id_mapping dict_id: numba typed.dict containing id_mapping.index -> location in np_pop_count - -------- + Outputs: + -------- np_pop_count: np.array containing population counts """ # Loop the population data @@ -972,21 +980,21 @@ def calculate_transform_and_coords_for_window( ): """ Function which calculates the [lat,long] corners of the window given - window_dimensions, + window_dimensions, if not(original_window) it also changes the affine + transform to match the window. - if not(original_window) it also changes the affine transform to match the window - ------- Inputs: - current_transform: affine transform of source image - window_dimensions: dimensions of window used when reading file - original_window: boolean to track if window covers entire country - -------- + ------- + - current_transform: affine transform of source image + - window_dimensions: dimensions of window used when reading file + - original_window: boolean to track if window covers entire country + Outputs: + -------- A list of: [ adjusted_transform: affine transform adjusted to window coordinate_topleft: [latitude, longitude] of top left corner of the window - coordinate_botright: [latitude, longitude] of bottom right corner of the window - ] + coordinate_botright: [latitude, longitude] of bottom right corner of the window ] """ col_offset, row_offset, x_axis_len, y_axis_len = window_dimensions @@ -1028,13 +1036,15 @@ def generate_df_tasks(c_code, mem_read_limit_per_process, WorldPop_inputfile): Function to generate a list of tasks based on the memory constraints. One task represents a single window of the image - ------- + Inputs: + ------- c_code: country code mem_read_limit_per_process: memory limit for src.read() operation WorldPop_inputfile: file location of worldpop file - -------- + Outputs: + -------- Dataframe of task_list """ task_list = [] @@ -1125,6 +1135,7 @@ def add_population_data( shape is identified by summing over all pixels mapped to that region. This is performed with an iterative approach: + 1. All necessary WorldPop data tiff file are downloaded 2. The so-called windows are created to handle RAM limitations related to large WorldPop files. Large WorldPop files require significant RAM to handle, which may not be available, diff --git a/scripts/clean_osm_data.py b/scripts/clean_osm_data.py index 73cb52f31..f4cd3e5a4 100644 --- a/scripts/clean_osm_data.py +++ b/scripts/clean_osm_data.py @@ -454,6 +454,7 @@ def split_and_match_voltage_frequency_size(df): last value in the column. The function does as follows: + 1. First, it splits voltage and frequency columns by semicolon For example, the following lines row 1: '50', '220000 @@ -514,6 +515,7 @@ def fill_circuits(df): element matches the size of the list in the frequency column. Multiple procedure are adopted: + 1. In the rows of circuits where the number of elements matches the number of the frequency column, nothing is done 2. Where the number of elements in the cables column match the ones diff --git a/scripts/download_osm_data.py b/scripts/download_osm_data.py index ced394484..483e955a7 100644 --- a/scripts/download_osm_data.py +++ b/scripts/download_osm_data.py @@ -44,7 +44,7 @@ def country_list_to_geofk(country_list): Parameters ---------- input : str - Any two-letter country name or aggregation of countries given in config_osm_data.py + Any two-letter country name or aggregation of countries given in osm_config.yaml Country name duplications won't distort the result. Examples are: ["NG","ZA"], downloading osm data for Nigeria and South Africa diff --git a/scripts/make_statistics.py b/scripts/make_statistics.py index 22e367a44..ac84ced29 100644 --- a/scripts/make_statistics.py +++ b/scripts/make_statistics.py @@ -10,6 +10,7 @@ This script contains functions to create statistics of the workflow for the current execution Relevant statistics that are created are: + - For clean_osm_data and download_osm_data, the number of elements, length of the lines and length of dc lines are stored - For build_shapes, the surface, total GDP, total population and number of shapes are collected diff --git a/scripts/make_summary.py b/scripts/make_summary.py index f64027684..a52e86c21 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -9,7 +9,8 @@ Relevant Settings ----------------- -..code:: yaml +.. code:: yaml + costs: USD2013_to_EUR2013: discountrate: @@ -17,29 +18,38 @@ capital_cost: electricity: max_hours: -..seealso:: + +.. seealso:: Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`, :ref:`electricity_cf` + Inputs ------ + Outputs ------- + Description ----------- The following rule can be used to summarize the results in separate .csv files: -..code:: + +.. code:: bash + snakemake results/summaries/elec_s_all_lall_Co2L-3H_all clusters line volume or cost cap - options - all countries -the line volume/cost cap field can be set to one of the following: -* ``lv1.25`` for a particular line volume extension by 25% -* ``lc1.25`` for a line cost extension by 25 % -* ``lall`` for all evaluated caps -* ``lvall`` for all line volume caps -* ``lcall`` for all line cost caps -Replacing '/summaries/' with '/plots/' creates nice colored maps of the results. + +The line volume/cost cap field can be set to one of the following: + +- ``lv1.25`` for a particular line volume extension by 25% +- ``lc1.25`` for a line cost extension by 25 % +- ``lall`` for all evaluated caps +- ``lvall`` for all line volume caps +- ``lcall`` for all line cost caps + +Replacing *summaries* with *plots* creates nice colored maps of the results. """ import logging import os diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index 2338e892c..2797b5f85 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -20,14 +20,14 @@ seed: 42 # set seedling for reproducibilty uncertainties: loads_t.p_set: - type: uniform - args: [0, 1] + type: uniform + args: [0, 1] generators_t.p_max_pu.loc[:, n.generators.carrier == "onwind"]: - type: lognormal - args: [1.5] + type: lognormal + args: [1.5] generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: - type: beta - args: [0.5, 2] + type: beta + args: [0.5, 2] .. seealso:: Documentation of the configuration file ``config.yaml`` at :ref:`monte_cf` @@ -39,6 +39,7 @@ Outputs ------- - ``networks/elec_s_10_ec_lcopt_Co2L-24H_{unc}.nc`` + e.g. networks/elec_s_10_ec_lcopt_Co2L-24H_m0.nc networks/elec_s_10_ec_lcopt_Co2L-24H_m1.nc ... @@ -210,6 +211,7 @@ def rescale_distribution( - latin_hypercube (np.array): The Latin hypercube sampling to be rescaled. - uncertainties_values (list): List of dictionaries containing distribution information. + Each dictionary should have 'type' key specifying the distribution type and 'args' key containing parameters specific to the chosen distribution. From 0f289c57b2d87aed4f7e1a102f6ce7d650eea23e Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Thu, 1 Feb 2024 12:24:23 +0100 Subject: [PATCH 2/5] removed extra comments in Monte-Carlo config --- config.default.yaml | 2 -- config.tutorial.yaml | 2 -- 2 files changed, 4 deletions(-) diff --git a/config.default.yaml b/config.default.yaml index d70c3e88f..254caccd1 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -370,8 +370,6 @@ monte_carlo: generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: type: beta args: [0.5, 2] - # TODO: Support inputs to simulate outages biggest lines "lines.p_nom_opt.max()": [-3000MW 0MW] - # TODO: Support inputs to simulate outages of biggest power plant "generators.p_nom.max()": [-1000MW 0MW] diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 8ff9630cb..4f6328440 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -366,8 +366,6 @@ monte_carlo: generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: type: beta args: [0.5, 2] - # TODO: Support inputs to simulate outages biggest lines "lines.p_nom_opt.max()": [-3000MW 0MW] - # TODO: Support inputs to simulate outages of biggest power plant "generators.p_nom.max()": [-1000MW 0MW] solving: options: From b8fbbb48ec201cae8ced2e259b07f747b66e8b3b Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Sun, 4 Feb 2024 18:03:32 +0100 Subject: [PATCH 3/5] added monte-carlo into advanced usage --- doc/index.rst | 11 ++++++ doc/monte_carlo.rst | 83 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 doc/monte_carlo.rst diff --git a/doc/index.rst b/doc/index.rst index 969fbc3ca..3a7cf7b38 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -168,6 +168,17 @@ Documentation rules_overview api_reference +**Advanced Usage** + +* :doc:`monte_carlo` + +.. toctree:: + :hidden: + :maxdepth: 2 + :caption: Advanced Usage + + monte_carlo + **Help and References** * :doc:`release_notes` diff --git a/doc/monte_carlo.rst b/doc/monte_carlo.rst new file mode 100644 index 000000000..9248f090d --- /dev/null +++ b/doc/monte_carlo.rst @@ -0,0 +1,83 @@ +.. SPDX-FileCopyrightText: PyPSA-Earth and PyPSA-Eur Authors +.. +.. SPDX-License-Identifier: CC-BY-4.0 + +.. _monte_carlo: + +########################################## +Monte Carlo +########################################## + +The Monte Carlo method is a statistical technique that involves running +multiple simulations with randomly sampled input parameters to estimate +the distribution of the output variables. + +To use the Monte-Carlo method in PyPSA-Earth, you have to activate the +``monte_carlo`` option in the configuration file (``config.yaml``), +set ``add_to_snakefile`` to true in the monte_carlo section. This will +enable the monte-carlo method. + +There are a few additional options that needs to be set in the monte_carlo +configuration options. + +Options +------- +- ``samples``: The number of samples to be used in the monte-carlo simulation. +- ``samppling_strategy``: The method used to sample the input parameters. Either of ``pydoe2``, ``chaospy``, or ``scipy``. +- ``seed``: The seed for the random number generator. It is useful to set the seed to a fixed value to ensure reproducibility of the results. + +Uncertainties +------------- +The ``uncertainties`` section in the configuration file is used to specify the +parameters to be sampled in the monte-carlo simulation. The uncertainties +section is a dictionary with the keys being the ``pypsa object value`` to be +sampled and the values split into ``type`` and ``args`` of which ``type`` is used to +select the distribution and ``args`` used to specify the parameters of the selected +distribution type. + +The following is an example of the uncertainties section in the configuration file: + +.. code:: yaml + + uncertainties: + loads_t.p_set: + type: uniform + args: [0, 1] + generators_t.p_max_pu.loc[:, n.generators.carrier == "onwind"]: + type: lognormal + args: [1.5] + generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: + type: beta + args: [0.5, 2] + +.. note:: + To understand the different distribution types and their parameters, + check `Scipy Documentation `_ + +Workflow +-------- + +To perform a dry-run of the monte-carlo simulation after setting the config options, use the following command: + +.. code:: bash + + .../pypsa-earth % snakemake -j 1 solve_all_networks_monte -n + +To create a DAG of the monte-carlo simulation workflow, use the following command: + +.. code:: bash + + .../pypsa-earth % snakemake -j 1 solve_all_networks_monte --dag | dot -Tpng > monte_carlo_workflow.png + +.. image:: img/monte_carlo_workflow.png + :align: center + +The monte-carlo simulation can be run using the following rule: + +.. code:: bash + + .../pypsa-earth % snakemake -j 1 solve_all_networks_monte + +.. note:: + Increasing the number of cores can make the process run faster. The numbers of cores can be increased by + setting the ``-j`` option to the desired number of cores. From 13a77e4452238feb9fef822e6f7dc48107a4a611 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Sun, 4 Feb 2024 22:51:37 +0100 Subject: [PATCH 4/5] included example and tip --- doc/monte_carlo.rst | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/doc/monte_carlo.rst b/doc/monte_carlo.rst index 9248f090d..e2770c017 100644 --- a/doc/monte_carlo.rst +++ b/doc/monte_carlo.rst @@ -12,6 +12,14 @@ The Monte Carlo method is a statistical technique that involves running multiple simulations with randomly sampled input parameters to estimate the distribution of the output variables. +In Africa, navigating decision-making amidst rapid demand growth and +substantial infrastructure changes poses a significant challenge. To +tackle this uncertainty, the Monte Carlo method can be employed to +construct a stochastic interpretation of deterministic model scenarios. +This approach enhances the ability to account for variability and assess +potential outcomes, providing a more robust foundation for informed +decision-making. + To use the Monte-Carlo method in PyPSA-Earth, you have to activate the ``monte_carlo`` option in the configuration file (``config.yaml``), set ``add_to_snakefile`` to true in the monte_carlo section. This will @@ -20,14 +28,14 @@ enable the monte-carlo method. There are a few additional options that needs to be set in the monte_carlo configuration options. -Options -------- +Set ``options`` +------------- - ``samples``: The number of samples to be used in the monte-carlo simulation. - ``samppling_strategy``: The method used to sample the input parameters. Either of ``pydoe2``, ``chaospy``, or ``scipy``. - ``seed``: The seed for the random number generator. It is useful to set the seed to a fixed value to ensure reproducibility of the results. -Uncertainties -------------- +Set ``uncertainties`` +--------------------- The ``uncertainties`` section in the configuration file is used to specify the parameters to be sampled in the monte-carlo simulation. The uncertainties section is a dictionary with the keys being the ``pypsa object value`` to be @@ -54,6 +62,10 @@ The following is an example of the uncertainties section in the configuration fi To understand the different distribution types and their parameters, check `Scipy Documentation `_ +.. tip:: + To create reallistic uncertainties, it is important to pay attention to + the distribution that is being applied to each ``pypsa object parameter``. + Workflow -------- From fca87e0a9e31cc4403f66abc2c070451e88ce833 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Mon, 5 Feb 2024 00:29:33 +0100 Subject: [PATCH 5/5] added extra line --- doc/monte_carlo.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/monte_carlo.rst b/doc/monte_carlo.rst index e2770c017..426964f17 100644 --- a/doc/monte_carlo.rst +++ b/doc/monte_carlo.rst @@ -29,7 +29,7 @@ There are a few additional options that needs to be set in the monte_carlo configuration options. Set ``options`` -------------- +---------------- - ``samples``: The number of samples to be used in the monte-carlo simulation. - ``samppling_strategy``: The method used to sample the input parameters. Either of ``pydoe2``, ``chaospy``, or ``scipy``. - ``seed``: The seed for the random number generator. It is useful to set the seed to a fixed value to ensure reproducibility of the results.