diff --git a/Snakefile b/Snakefile index 763ce2fd3..9948d364d 100644 --- a/Snakefile +++ b/Snakefile @@ -67,10 +67,10 @@ if config["custom_rules"] is not []: rule clean: run: - shell("snakemake -j 1 solve_all_networks --delete-all-output") try: - shell("snakemake -j 1 solve_all_networks_monte --delete-all-output") + shell("snakemake -j 1 solve_all_networks --delete-all-output") except: + shell("snakemake -j 1 solve_all_networks_monte --delete-all-output") pass shell("snakemake -j 1 run_all_scenarios --delete-all-output") diff --git a/config.default.yaml b/config.default.yaml index f062bbb75..d70c3e88f 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -340,18 +340,39 @@ costs: monte_carlo: + # Description: Specify Monte Carlo sampling options for uncertainty analysis. + # Define the option list for Monte Carlo sampling. + # Make sure add_to_snakefile is set to true to enable Monte-Carlo options: - add_to_snakefile: false - samples: 7 # number of optimizations - sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported - pypsa_standard: - # User can add here flexibly more features for the Monte-Carlo sampling. - # Given as "key: value" format - # Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering! - # Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object - loads_t.p_set: [0.9, 1.1] - # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - # generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] + add_to_snakefile: false # When set to true, enables Monte Carlo sampling + samples: 9 # number of optimizations. Note that number of samples when using scipy has to be the square of a prime number + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + seed: 42 # set seedling for reproducibilty + # Uncertanties on any PyPSA object are specified by declaring the specific PyPSA object under the key 'uncertainties'. + # For each PyPSA object, the 'type' and 'args' keys represent the type of distribution and its argument, respectively. + # Supported distributions types are uniform, normal, lognormal, triangle, beta and gamma. + # The arguments of the distribution are passed using the key 'args' as follows, tailored by distribution type + # normal: [mean, std], lognormal: [mean, std], uniform: [lower_bound, upper_bound], + # triangle: [mid_point (between 0 - 1)], beta: [alpha, beta], gamma: [shape, scale] + # More info on the distributions are documented in the Chaospy reference guide... + # https://chaospy.readthedocs.io/en/master/reference/distribution/index.html + # An abstract example is as follows: + # {pypsa network object, e.g. "loads_t.p_set"}: + # type: {any supported distribution among the previous: "uniform", "normal", ...} + # args: {arguments passed as a list depending on the distribution, see the above and more at https://pypsa.readthedocs.io/} + uncertainties: + loads_t.p_set: + type: uniform + args: [0, 1] + generators_t.p_max_pu.loc[:, n.generators.carrier == "onwind"]: + type: lognormal + args: [1.5] + generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: + type: beta + args: [0.5, 2] + # TODO: Support inputs to simulate outages biggest lines "lines.p_nom_opt.max()": [-3000MW 0MW] + # TODO: Support inputs to simulate outages of biggest power plant "generators.p_nom.max()": [-1000MW 0MW] + solving: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 47092ff55..8ff9630cb 100644 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -336,19 +336,38 @@ costs: monte_carlo: + # Description: Specify Monte Carlo sampling options for uncertainty analysis. + # Define the option list for Monte Carlo sampling. + # Make sure add_to_snakefile is set to true to enable Monte-Carlo options: - add_to_snakefile: false - samples: 7 # number of optimizations - sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported - pypsa_standard: - # User can add here flexibly more features for the Monte-Carlo sampling. - # Given as "key: value" format - # Key: add below the pypsa object for the monte_carlo sampling, "network" is only allowed for filtering! - # Value: currently supported format [l_bound, u_bound] or empty [], represent multiplication factors for the object - loads_t.p_set: [0.9, 1.1] - # generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - # generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] - + add_to_snakefile: false # When set to true, enables Monte Carlo sampling + samples: 9 # number of optimizations. Note that number of samples when using scipy has to be the square of a prime number + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + seed: 42 # set seedling for reproducibilty + # Uncertanties on any PyPSA object are specified by declaring the specific PyPSA object under the key 'uncertainties'. + # For each PyPSA object, the 'type' and 'args' keys represent the type of distribution and its argument, respectively. + # Supported distributions types are uniform, normal, lognormal, triangle, beta and gamma. + # The arguments of the distribution are passed using the key 'args' as follows, tailored by distribution type + # normal: [mean, std], lognormal: [mean, std], uniform: [lower_bound, upper_bound], + # triangle: [mid_point (between 0 - 1)], beta: [alpha, beta], gamma: [shape, scale] + # More info on the distributions are documented in the Chaospy reference guide... + # https://chaospy.readthedocs.io/en/master/reference/distribution/index.html + # An abstract example is as follows: + # {pypsa network object, e.g. "loads_t.p_set"}: + # type: {any supported distribution among the previous: "uniform", "normal", ...} + # args: {arguments passed as a list depending on the distribution, see the above and more at https://pypsa.readthedocs.io/} + uncertainties: + loads_t.p_set: + type: uniform + args: [0, 1] + generators_t.p_max_pu.loc[:, n.generators.carrier == "onwind"]: + type: lognormal + args: [1.5] + generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: + type: beta + args: [0.5, 2] + # TODO: Support inputs to simulate outages biggest lines "lines.p_nom_opt.max()": [-3000MW 0MW] + # TODO: Support inputs to simulate outages of biggest power plant "generators.p_nom.max()": [-1000MW 0MW] solving: options: diff --git a/doc/api_reference.rst b/doc/api_reference.rst index b5832936b..5cf8fdf3a 100644 --- a/doc/api_reference.rst +++ b/doc/api_reference.rst @@ -103,3 +103,9 @@ make_statistics .. automodule:: make_statistics :members: + +monte_carlo +------------------------------- + +.. automodule:: monte_carlo + :members: diff --git a/doc/configtables/monte-carlo.csv b/doc/configtables/monte-carlo.csv index efde31bd0..bc922f2f3 100644 --- a/doc/configtables/monte-carlo.csv +++ b/doc/configtables/monte-carlo.csv @@ -1,7 +1,10 @@ ,Unit,Values,Description -options,,, --- add_to_snakemake,,true or false,Add rule to Snakefile or not --- samples,,"int** read description!", "Defines the number of total sample networks that will be later optimized. if orthogonal latin hypercube sampling is applied (default option in scripts), then a prime number, that is not a quadrat of a prime, needs to be chosen. E.g. 7 not 9 (3^2)" --- sampling_strategy,,"Any subset of {""pydoe2"", ""chaospy"", ""scipy""}",Current supported packages to create an experimental design -pypsa_standard,,, --- ,MW/MWh,"[l_bound, u_bound] or empty []","`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object." +**options**,,, +add_to_snakemake,,"true or false","Set to true to enable Monte-Carlo" +samples,,"int","Defines the number of total sample networks that will be optimized. If the chosen sampling strategy is scipy, then a square of a prime number needs to be chosen. E.g. 49 which is (7^2)" +sampling_strategy,,"Any subset of {pydoe2, chaospy, scipy}","Current supported packages to create an experimental design" +seed,,"int","Allows experimentation to be reproduced easily" +**uncertainties**,,, +,MW/MWh,,"`Key` is a dynamic PyPSA object that allows to access any pypsa object such as `loads_t.p_set` or the max. wind generation per hour `generators_t.p_max_pu.loc[:, n.generators.carrier == ""wind""]`. `Values` or bounds are multiplication for each object." +type,,"str","Defines the distribution for the chosen pypsa.object parameter. Distribution can be either uniform, normal, lognormal, triangle, beta or gamma" +args,,"list","Defines parameters for the chosen distribution. [mean, std] for normal and lognormal, [lower_bound, upper_bound] for uniform, [mid_point (between 0 - 1)] for triangle, [alpha, beta] for beta, [shape, scale] for gamma" diff --git a/scripts/monte_carlo.py b/scripts/monte_carlo.py index 5eecb276c..2338e892c 100644 --- a/scripts/monte_carlo.py +++ b/scripts/monte_carlo.py @@ -14,18 +14,23 @@ monte_carlo: options: - add_to_snakefile: - samples: - sampling_strategy: - pypsa_standard: - - Examples... - loads_t.p_set: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "wind"]: [0.9, 1.1] - generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: [0.9, 1.1] + add_to_snakefile: false # When set to true, enables Monte Carlo sampling + samples: 9 # number of optimizations. Note that number of samples when using scipy has to be the square of a prime number + sampling_strategy: "chaospy" # "pydoe2", "chaospy", "scipy", packages that are supported + seed: 42 # set seedling for reproducibilty + uncertainties: + loads_t.p_set: + type: uniform + args: [0, 1] + generators_t.p_max_pu.loc[:, n.generators.carrier == "onwind"]: + type: lognormal + args: [1.5] + generators_t.p_max_pu.loc[:, n.generators.carrier == "solar"]: + type: beta + args: [0.5, 2] .. seealso:: - Documentation of the configuration file ``config.yaml`` at :ref:`_monte_cf` + Documentation of the configuration file ``config.yaml`` at :ref:`monte_cf` Inputs ------ @@ -52,13 +57,11 @@ supported by the packages pyDOE2, chaospy and scipy. This should give users the freedom to explore alternative approaches. The orthogonal latin hypercube sampling is thereby found as most performant, hence, implemented here. Sampling the multi-dimensional uncertainty space is relatively -easy. It only requires two things: The number of *samples* (e.g. PyPSA networks) and *features* (e.g. -load or solar timeseries). This results in an experimental design of the dimension (samples X features). +easy. It only requires two things: The number of *samples* (defines the number of total networks to +be optimized) and *features* (pypsa network object e.g loads_t.p_set or generators_t.p_max_pu). This +results in an experimental design of the dimension (samples X features). -Additionally, upper and lower bounds *per feature* need to be provided such that the experimental -design can be scaled accordingly. Currently the user can define uncertainty ranges e.g. bounds, -for all PyPSA objects that are `int` or `float`. Boolean values could be used but require testing. -The experimental design `lhs_scaled` (dimension: sample X features) is then used to modify the PyPSA +The experimental design `lh` (dimension: sample X features) is used to modify the PyPSA networks. Thereby, this script creates samples x amount of networks. The iterators comes from the wildcard {unc}, which is described in the config.yaml and created in the Snakefile as a range from 0 to (total number of) SAMPLES. @@ -70,22 +73,26 @@ import numpy as np import pandas as pd import pypsa +import seaborn as sns from _helpers import configure_logging, create_logger from pyDOE2 import lhs -from scipy.stats import qmc +from scipy.stats import beta, gamma, lognorm, norm, qmc, triang +from sklearn.preprocessing import MinMaxScaler from solve_network import * logger = create_logger(__name__) +sns.set(style="whitegrid") def monte_carlo_sampling_pydoe2( - N_FEATURES, - SAMPLES, - criterion=None, - iteration=None, - random_state=42, - correlation_matrix=None, -): + N_FEATURES: int, + SAMPLES: int, + uncertainties_values: dict, + random_state: int, + criterion: str = None, + iteration: int = None, + correlation_matrix: np.ndarray = None, +) -> np.ndarray: """ Creates Latin Hypercube Sample (LHS) implementation from PyDOE2 with various options. Additionally all "corners" are simulated. @@ -93,6 +100,8 @@ def monte_carlo_sampling_pydoe2( Adapted from Disspaset: https://github.com/energy-modelling-toolkit/Dispa-SET/blob/master/scripts/build_and_run_hypercube.py Documentation on PyDOE2: https://github.com/clicumu/pyDOE2 (fixes latin_cube errors) """ + from pyDOE2 import lhs + from scipy.stats import qmc # Generate a Nfeatures-dimensional latin hypercube varying between 0 and 1: lh = lhs( @@ -103,42 +112,64 @@ def monte_carlo_sampling_pydoe2( random_state=random_state, correlation_matrix=correlation_matrix, ) + + lh = rescale_distribution(lh, uncertainties_values) discrepancy = qmc.discrepancy(lh) - logger.info(f"Hypercube discrepancy is: {discrepancy}") + logger.info( + "Discrepancy is:", discrepancy, " more details in function documentation." + ) return lh -def monte_carlo_sampling_chaospy(N_FEATURES, SAMPLES, rule="latin_hypercube", seed=42): +def monte_carlo_sampling_chaospy( + N_FEATURES: int, + SAMPLES: int, + uncertainties_values: dict, + seed: int, + rule: str = "latin_hypercube", +) -> np.ndarray: """ Creates Latin Hypercube Sample (LHS) implementation from chaospy. Documentation on Chaospy: https://github.com/clicumu/pyDOE2 (fixes latin_cube errors) Documentation on Chaospy latin-hyper cube (quasi-Monte Carlo method): https://chaospy.readthedocs.io/en/master/user_guide/fundamentals/quasi_random_samples.html#Quasi-random-samples """ - # Generate a Nfeatures-dimensional latin hypercube varying between 0 and 1: + import chaospy + from scipy.stats import qmc + + # generate a Nfeatures-dimensional latin hypercube varying between 0 and 1: N_FEATURES = "chaospy.Uniform(0, 1), " * N_FEATURES uniform_cube = eval( f"chaospy.J({N_FEATURES})" ) # writes Nfeatures times the chaospy.uniform... command) - lh = np.atleast_2d(uniform_cube.sample(SAMPLES, rule=rule, seed=seed)).T + lh = uniform_cube.sample(SAMPLES, rule=rule, seed=seed).T + + lh = rescale_distribution(lh, uncertainties_values) discrepancy = qmc.discrepancy(lh) - logger.info(f"Hypercube discrepancy is: {discrepancy}") + logger.info( + "Discrepancy is:", discrepancy, " more details in function documentation." + ) return lh def monte_carlo_sampling_scipy( - N_FEATURES, SAMPLES, centered=False, strength=2, optimization=None, seed=42 -): + N_FEATURES: int, + SAMPLES: int, + uncertainties_values: dict, + seed: int, + strength: int = 2, + optimization: str = None, +) -> np.ndarray: """ Creates Latin Hypercube Sample (LHS) implementation from SciPy with various options: - Center the point within the multi-dimensional grid, centered=True - - optimization scheme, optimization="random-cd" - - strength=1, classical LHS - - strength=2, performant orthogonal LHS, requires the sample to be a prime or square of a prime e.g. sqr(121)=11 + - Optimization scheme, optimization="random-cd" + - Strength=1, classical LHS + - Strength=2, performant orthogonal LHS, requires the sample to be square of a prime e.g. sq(11)=121 Options could be combined to produce an optimized centered orthogonal array based LHS. After optimization, the result would not be guaranteed to be of strength 2. @@ -147,20 +178,173 @@ def monte_carlo_sampling_scipy( Documentation for Latin Hypercube: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.qmc.LatinHypercube.html#scipy.stats.qmc.LatinHypercube Orthogonal LHS is better than basic LHS: https://github.com/scipy/scipy/pull/14546/files, https://en.wikipedia.org/wiki/Latin_hypercube_sampling """ + from scipy.stats import qmc + sampler = qmc.LatinHypercube( d=N_FEATURES, - centered=centered, strength=strength, optimization=optimization, seed=seed, ) + lh = sampler.random(n=SAMPLES) + + lh = rescale_distribution(lh, uncertainties_values) discrepancy = qmc.discrepancy(lh) - logger.info(f"Hypercube discrepancy is: {discrepancy}") + logger.info( + "Discrepancy is:", discrepancy, " more details in function documentation." + ) return lh +def rescale_distribution( + latin_hypercube: np.ndarray, uncertainties_values: dict +) -> np.ndarray: + """ + Rescales a Latin hypercube sampling (LHS) using specified distribution + parameters. More information on the distributions can be found + here https://docs.scipy.org/doc/scipy/reference/stats.html + + **Parameters**: + + - latin_hypercube (np.array): The Latin hypercube sampling to be rescaled. + - uncertainties_values (list): List of dictionaries containing distribution information. + Each dictionary should have 'type' key specifying the distribution type and 'args' key + containing parameters specific to the chosen distribution. + + **Returns**: + + - np.array: Rescaled Latin hypercube sampling with values in the range [0, 1]. + + **Supported Distributions**: + + - "uniform": Rescaled to the specified lower and upper bounds. + - "normal": Rescaled using the inverse of the normal distribution function with specified mean and std. + - "lognormal": Rescaled using the inverse of the log-normal distribution function with specified mean and std. + - "triangle": Rescaled using the inverse of the triangular distribution function with mean calculated from given parameters. + - "beta": Rescaled using the inverse of the beta distribution function with specified shape parameters. + - "gamma": Rescaled using the inverse of the gamma distribution function with specified shape and scale parameters. + + **Note**: + + - The function supports rescaling for uniform, normal, lognormal, triangle, beta, and gamma distributions. + - The rescaled samples will have values in the range [0, 1]. + """ + from scipy.stats import beta, gamma, lognorm, norm, qmc, triang + from sklearn.preprocessing import MinMaxScaler, minmax_scale + + for idx, value in enumerate(uncertainties_values): + dist = value.get("type") + params = value.get("args") + + match dist: + case "uniform": + l_bounds, u_bounds = params + latin_hypercube[:, idx] = minmax_scale( + latin_hypercube[:, idx], feature_range=(l_bounds, u_bounds) + ) + case "normal": + mean, std = params + latin_hypercube[:, idx] = norm.ppf(latin_hypercube[:, idx], mean, std) + case "lognormal": + shape = params[0] + latin_hypercube[:, idx] = lognorm.ppf(latin_hypercube[:, idx], s=shape) + case "triangle": + mid_point = params[0] + latin_hypercube[:, idx] = triang.ppf(latin_hypercube[:, idx], mid_point) + case "beta": + a, b = params + latin_hypercube[:, idx] = beta.ppf(latin_hypercube[:, idx], a, b) + case "gamma": + shape, scale = params + latin_hypercube[:, idx] = gamma.ppf( + latin_hypercube[:, idx], shape, scale + ) + + # samples space needs to be from 0 to 1 + mm = MinMaxScaler(feature_range=(0, 1), clip=True) + latin_hypercube = mm.fit_transform(latin_hypercube) + + return latin_hypercube + + +def validate_parameters( + sampling_strategy: str, samples: int, uncertainties_values: dict +) -> None: + """ + Validates the parameters for a given probability distribution. Inputs from + user through the config file needs to be validated before proceeding to + perform monte-carlo simulations. + + **Parameters**: + + - sampling_strategy: str + The chosen sampling strategy from chaospy, scipy and pydoe2 + - samples: int + The number of samples to generate for the simulation + - distribution: str + The name of the probability distribution. + - distribution_params: list + The parameters associated with the probability distribution. + + **Raises**: + + - ValueError: If the parameters are invalid for the specified distribution. + """ + + valid_strategy = ["chaospy", "scipy", "pydoe2"] + valid_distribution = ["uniform", "normal", "lognormal", "triangle", "beta", "gamma"] + + # verifying samples and distribution_params + if samples is None: + raise ValueError(f"assign a value to samples") + elif type(samples) is float or type(samples) is str: + raise ValueError(f"samples must be an integer") + + # verify sampling strategy + if sampling_strategy not in valid_strategy: + raise ValueError( + f" Invalid sampling strategy: {sampling_strategy}. Choose from {valid_strategy}" + ) + + for idx, value in enumerate(uncertainties_values): + dist_type = value.get("type") + param = value.get("args") + + if dist_type is None or len(param) == 0: + raise ValueError(f"assign a list of parameters to distribution_params") + + if dist_type not in valid_distribution: + raise ValueError( + f"Unsupported Distribution : {dist_type}. Choose from {valid_distribution}" + ) + + if dist_type == "triangle": + if len(param) != 1: + raise ValueError( + f"{dist_type} distribution has to be 1 parameter [midpoint_between_0_and_1]" + ) + elif param[0] < 0 or param[0] > 1: + raise ValueError( + f"{dist_type} distribution has to have a midpoint between 0 and 1" + ) + + if dist_type in ["normal", "uniform", "beta", "gamma"] and len(param) != 2: + raise ValueError(f"{dist_type} distribution must have 2 parameters") + elif dist_type == "lognormal" and len(param) != 1: + raise ValueError(f"{dist_type} must have a single parameter") + + # handling having 0 as values in beta and gamma + if dist_type in ["beta", "gamma"]: + if np.min(param) <= 0: + raise ValueError( + f"{dist_type} distribution cannot have values lower than zero in parameters" + ) + + return None + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -177,14 +361,12 @@ def monte_carlo_sampling_scipy( configure_logging(snakemake) monte_carlo_config = snakemake.params.monte_carlo - ### SCENARIO INPUTS + # SCENARIO INPUTS ### - MONTE_CARLO_PYPSA_FEATURES = { - k: v for k, v in monte_carlo_config["pypsa_standard"].items() if v - } # removes key value pairs with empty value e.g. [] + MONTE_CARLO_PYPSA_FEATURES = [ + k for k in monte_carlo_config["uncertainties"].keys() if k + ] # removes key value pairs with empty value e.g. [] MONTE_CARLO_OPTIONS = monte_carlo_config["options"] - L_BOUNDS = [item[0] for item in MONTE_CARLO_PYPSA_FEATURES.values()] - U_BOUNDS = [item[1] for item in MONTE_CARLO_PYPSA_FEATURES.values()] N_FEATURES = len( MONTE_CARLO_PYPSA_FEATURES ) # only counts features when specified in config @@ -192,56 +374,65 @@ def monte_carlo_sampling_scipy( "samples" ) # TODO: What is the optimal sampling? Fabian Neumann answered that in "Broad ranges" paper SAMPLING_STRATEGY = MONTE_CARLO_OPTIONS.get("sampling_strategy") + UNCERTAINTIES_VALUES = monte_carlo_config["uncertainties"].values() + SEED = MONTE_CARLO_OPTIONS.get("seed") + + # PARAMETERS VALIDATION + # validates the parameters supplied from config file + validate_parameters(SAMPLING_STRATEGY, SAMPLES, UNCERTAINTIES_VALUES) - ### SCENARIO CREATION / SAMPLING STRATEGY + # SCENARIO CREATION / SAMPLING STRATEGY ### if SAMPLING_STRATEGY == "pydoe2": lh = monte_carlo_sampling_pydoe2( N_FEATURES, SAMPLES, + UNCERTAINTIES_VALUES, + random_state=SEED, criterion=None, iteration=None, - random_state=42, correlation_matrix=None, ) if SAMPLING_STRATEGY == "scipy": lh = monte_carlo_sampling_scipy( N_FEATURES, SAMPLES, - centered=False, + UNCERTAINTIES_VALUES, + seed=SEED, strength=2, optimization=None, - seed=42, ) if SAMPLING_STRATEGY == "chaospy": lh = monte_carlo_sampling_chaospy( - N_FEATURES, - SAMPLES, - rule="latin_hypercube", - seed=42, + N_FEATURES, SAMPLES, UNCERTAINTIES_VALUES, seed=SEED, rule="latin_hypercube" ) - lh_scaled = qmc.scale(lh, L_BOUNDS, U_BOUNDS) - ### MONTE-CARLO MODIFICATIONS + # create plot for the rescaled distributions (for development usage, commented by default) + # for idx in range(N_FEATURES): + # sns.displot(lh[:, idx], kde=True).set( + # title=f"{MONTE_CARLO_PYPSA_FEATURES[idx]}" + # ).figure.savefig(f"{MONTE_CARLO_PYPSA_FEATURES[idx]}.png", bbox_inches="tight") + + # MONTE-CARLO MODIFICATIONS ### n = pypsa.Network(snakemake.input[0]) unc_wildcards = snakemake.wildcards[-1] i = int(unc_wildcards[1:]) j = 0 - for k, v in MONTE_CARLO_PYPSA_FEATURES.items(): + for k in MONTE_CARLO_PYPSA_FEATURES: # this loop sets in one scenario each "i" feature assumption # k is the config input key "loads_t.p_set" # v is the lower and upper bound [0.8,1.3], that was used for lh_scaled # i, j interaction number to pick values of experimental setup # Example: n.loads_t.p_set = network.loads_t.p_set = .loads_t.p_set * lh_scaled[0,0] - exec(f"n.{k} = n.{k} * {lh_scaled[i,j]}") - logger.info(f"Scaled n.{k} by factor {lh_scaled[i,j]} in the {i} scenario") + exec(f"n.{k} = n.{k} * {lh[i,j]}") + logger.info(f"Scaled n.{k} by factor {lh[i,j]} in the {i} scenario") j = j + 1 - ### EXPORT AND METADATA + # EXPORT AND METADATA # latin_hypercube_dict = ( - pd.DataFrame(lh_scaled).rename_axis("Nruns").add_suffix("_feature") + pd.DataFrame(lh).rename_axis("Nruns").add_suffix("_feature") ).to_dict() n.meta.update(latin_hypercube_dict) n.export_to_netcdf(snakemake.output[0])