diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a243a304b..8ab61e1e7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,8 +31,8 @@ jobs: env_file: envs/linux-pinned.yaml - os: macos env_file: envs/macos-pinned.yaml - # - os: windows - # env_file: envs/windows-pinned.yaml + - os: windows + env_file: envs/windows-pinned.yaml defaults: run: diff --git a/.readthedocs.yaml b/.readthedocs.yaml index e04e28caf..2bac06440 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -4,6 +4,9 @@ version: 2 +sphinx: + configuration: doc/conf.py + build: os: ubuntu-22.04 tools: diff --git a/README.md b/README.md index 6de82672e..d9325b303 100644 --- a/README.md +++ b/README.md @@ -58,41 +58,22 @@ The diagram below depicts one representative clustered node for the sector-coupl -## Get involved - -There are multiple ways to get involved and learn more about our work. That's how we organise ourselves: - -- [**Discord NEW! (Open)**](https://discord.gg/AnuJBk23FU) - - chat with the community, team up on features, exchange with developers, code in voice channels - - registration and usage is for free -

- - - -

-- **General initiative meeting (Open)** - - every forth Thursday each month Thursday 16-17:00 (UK time) - - `download .ics` - - - join for project news and high-level code updates - - meeting hosted on Discord - - [open agenda](https://docs.google.com/document/d/1r6wm2RBe0DWFngmItpFfSFHA-CnUmVcVTkIKmthdW3g/edit?usp=sharing). See what we will discuss. Invited members have edit rights. -- **Buddy talk (Open)** - - book a 30min meeting with Max to discuss anything you like - - booking link: [calendly.com/pypsa-meets-earth](https://calendly.com/max-parzen/pypsa-meets-earth-exchange-30min) -- **Specific code meeting (Open)** - - meeting hosted on Discord - - join updates, demos, Q&A's, discussions and the coordination of each work package - 1. Demand creation and prediction meeting, on demand - 2. AI asset detection meeting, on demand - 3. Sector coupling meeting, every Thursday 09:00 (UK time), `download .ics` - 4. PyPSA-Earth meeting, every Thursday 16:00 (UK time), `download .ics` -- **Outreach meeting (Open)** - - every second week, Tuesday 17:00 (UK time) - - planning, discussing events, workshops, communication, community activities -- [**Google Drive**](https://drive.google.com/drive/folders/13Z8Y9zgsh5IZaDNkkRyo1wkoMgbdUxT5?usp=sharing) - - access to minutes, presentations, lists, documents (access to minutes) +## How to get involved + +There are multiple ways to get involved and learn more about our work: +1. **Join our forum** and communication platform on [**PyPSA-meets-Earth**](https://discord.gg/AnuJBk23FU) Discord Server +2. **Chat on Discord with us** in the following open meetings: + - **General initiative meeting** for project news and [high-level code updates](https://docs.google.com/document/d/1r6wm2RBe0DWFngmItpFfSFHA-CnUmVcVTkIKmthdW3g/edit?usp=sharing). Held every [fourth Thursday 16-17:00 (UK time)](https://drive.google.com/file/d/1naH4WwW9drkOkOJ3PLO4fyWdkZQi5-_w/view?usp=share_link) and is a perfect place to meet the community and get a high-level update on PyPSA ecosystem relevant for PyPSA-Earth developments. + - **Weekly developers meetings** + - Eastern-Hemisphere friendly *Morning meeting* every [Thursday at 09:00 (UK time)](https://drive.google.com/file/d/1PDdmjsKhzyGRo0_YrP4wPQkn2XTNh6jA/view?usp=share_link). + - Western-Hemisphere friendly *Evening meeting* every [Thursday 16:00 (UK time)](https://drive.google.com/file/d/1gaLmyV4qGPXsogkeRcAPWjC0ESebUxU-/view?usp=share_link). Every forth Thursday is replaced by the General initiative meeting which has a more high-level perspective, but you can also join to discuss more particular questions. +3. **Look at public materials** at [**google Drive**](https://drive.google.com/drive/folders/13Z8Y9zgsh5IZaDNkkRyo1wkoMgbdUxT5?usp=sharing) to share to minutes, presentations, lists and documents. Feel gree to get a look! +4. **Notify your interest** to on-demand meetings: + - On-demand meetings + - Demand creation and prediction meeting + - AI asset detection meeting + - Outreach meeting for planning, discussing events, workshops, communication, community activities +5. Join us and **propose your stream**. ## Installation @@ -140,10 +121,10 @@ There are multiple ways to get involved and learn more about our work. That's ho ## Running the model in previous versions -The model can be run in previous versions by checking out the respective tag. For instance, to run the model in version 0.4.1, which is the last version before the repo `pypsa-earth-sec` was merged, the following command can be used: +The model can be run in previous versions by checking out the respective tag. For instance, to run the model in version 0.6.0, which is the last version before the repo `pypsa-earth-sec` was merged, the following command can be used: ```bash -git checkout v0.4.1 +git checkout v0.6.0 ``` After checking out the tag, the model can be run as usual. Please make sure to install the required packages for the respective version. diff --git a/Snakefile b/Snakefile index 3b078bd3c..b54a82e7a 100644 --- a/Snakefile +++ b/Snakefile @@ -16,6 +16,7 @@ from _helpers import ( get_last_commit_message, check_config_version, copy_default_files, + BASE_DIR, ) from build_demand_profiles import get_load_paths_gegis from retrieve_databundle_light import datafiles_retrivedatabundle @@ -552,6 +553,7 @@ rule add_electricity: rule simplify_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], renewable=config["renewable"], geo_crs=config["crs"]["geo_crs"], cluster_options=config["cluster_options"], @@ -594,6 +596,7 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == True: rule cluster_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], build_shape_options=config["build_shape_options"], electricity=config["electricity"], costs=config["costs"], @@ -679,6 +682,7 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == False: rule cluster_network: params: + aggregation_strategies=config["cluster_options"]["aggregation_strategies"], build_shape_options=config["build_shape_options"], electricity=config["electricity"], costs=config["costs"], @@ -732,7 +736,10 @@ if config["augmented_line_connection"].get("add_to_snakefile", False) == False: rule add_extra_components: + params: + transmission_efficiency=config["sector"]["transmission_efficiency"], input: + overrides="data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", tech_costs=COSTS, output: @@ -806,6 +813,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == False: solving=config["solving"], augmented_line_connection=config["augmented_line_connection"], input: + overrides=BASE_DIR + "/data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", output: "results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", @@ -872,6 +880,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: solving=config["solving"], augmented_line_connection=config["augmented_line_connection"], input: + overrides=BASE_DIR + "/data/override_component_attrs", network="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{unc}.nc", @@ -901,7 +910,7 @@ if config["monte_carlo"]["options"].get("add_to_snakefile", False) == True: resources: mem_mb=memory, shadow: - "shallow" + "copy-minimal" if os.name == "nt" else "shallow" script: "scripts/solve_network.py" @@ -1141,7 +1150,7 @@ rule build_ship_profile: rule add_export: params: - gadm_level=config["sector"]["gadm_level"], + gadm_layer_id=config["build_shape_options"]["gadm_layer_id"], alternative_clustering=config["cluster_options"]["alternative_clustering"], store=config["export"]["store"], store_capital_costs=config["export"]["store_capital_costs"], @@ -1615,7 +1624,7 @@ if config["foresight"] == "overnight": solving=config["solving"], augmented_line_connection=config["augmented_line_connection"], input: - overrides="data/override_component_attrs", + overrides=BASE_DIR + "/data/override_component_attrs", # network=RESDIR # + "prenetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}.nc", network=RESDIR @@ -1626,7 +1635,7 @@ if config["foresight"] == "overnight": RESDIR + "postnetworks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.nc", shadow: - "shallow" + "copy-minimal" if os.name == "nt" else "shallow" log: solver=RESDIR + "logs/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export_solver.log", @@ -1824,7 +1833,7 @@ rule prepare_db: rule build_industrial_distribution_key: #default data params: countries=config["countries"], - gadm_level=config["sector"]["gadm_level"], + gadm_layer_id=config["build_shape_options"]["gadm_layer_id"], alternative_clustering=config["cluster_options"]["alternative_clustering"], industry_database=config["custom_data"]["industry_database"], input: @@ -2081,7 +2090,7 @@ if config["foresight"] == "myopic": "co2_sequestration_potential", 200 ), input: - overrides="data/override_component_attrs", + overrides=BASE_DIR + "/data/override_component_attrs", network=RESDIR + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.nc", costs=CDIR + "costs_{planning_horizons}.csv", @@ -2092,7 +2101,7 @@ if config["foresight"] == "myopic": # config=RESDIR # + "configs/config.elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export.yaml", shadow: - "shallow" + "copy-minimal" if os.name == "nt" else "shallow" log: solver=RESDIR + "logs/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{sopts}_{planning_horizons}_{discountrate}_{demand}_{h2export}export_solver.log", diff --git a/config.default.yaml b/config.default.yaml index 5318af083..56e03a56e 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -98,6 +98,7 @@ cluster_options: p_nom_max: sum p_nom_min: sum p_min_pu: mean + p_max_pu: weighted_average marginal_cost: mean committable: any ramp_limit_up: max @@ -602,6 +603,9 @@ sector: transmission_efficiency: electricity distribution grid: efficiency_static: 0.97 # efficiency of distribution grid (i.e. 3% loses) + H2 pipeline: + efficiency_per_1000km: 1 + compression_per_1000km: 0.017 # DEA technology data. Mean of Energy losses, lines 5000-20000 MW and lines >20000 MW for 2020, 2030 and 2050, [%/1000 km] dynamic_transport: enable: false # If "True", then the BEV and FCEV shares are obtained depending on the "Co2L"-wildcard (e.g. "Co2L0.70: 0.10"). If "False", then the shares are obtained depending on the "demand" wildcard and "planning_horizons" wildcard as listed below (e.g. "DF_2050: 0.08") @@ -657,7 +661,6 @@ sector: co2_network: true co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe co2_sequestration_cost: 10 #EUR/tCO2 for sequestration of CO2 - hydrogen_underground_storage: true shipping_hydrogen_liquefaction: false shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011 @@ -672,7 +675,6 @@ sector: NZ_2050: 0.36 DF_2050: 0.12 - gadm_level: 1 h2_cavern: true marginal_cost_storage: 0 methanation: true @@ -695,7 +697,6 @@ sector: biomass: biomass keep_existing_capacities: true - solving: options: formulation: kirchhoff diff --git a/doc/index.rst b/doc/index.rst index 92e17a55a..25e43dfcb 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -78,39 +78,19 @@ The `website `_ provides more context of t Get Involved ============== -The PyPSA meets Earth team is currently running four types of meetings: - - -- `**Discord NEW! (Open)** `_ - - - Chat with the community, team up on features, exchange with developers, code in voice channels - -- **General code meeting (Open)** - - - every forth Thursday each month 16-17:00 (UK time) `download .ics file `_ - - updates on overall project and code blocks - - meeting hosted on `Discord `_; join us, we are waiting for you! - - `open agenda `_. See what we will discuss. Invited members have edit rights. - -- **Specific code meeting (by invitation)** - - - meeting hosted on Discord - - join updates, demos, Q&A's, discussions and the coordination of each work package - - 1. Demand creation and prediction meeting, on demand basis - 2. AI asset detection meeting, on demand basis - 3. Sector coupling meeting, every Thursday 09:00 (UK time), `download .ics file `__ - 4. PyPSA-Earth meeting, every Thursday 16:00 (UK time), `download .ics file `__ - -- **Outreach meeting (by invitation)** - - - every second week - - planning, discussing events, workshops, communication, community activities - -- **Buddy talk (Open)** - - - book a 30min meeting with Max to discuss anything you like - - booking link: `calendly.com/pypsa-meets-earth `_ +There are multiple ways to get involved and learn more about our work: + 1. **Join our forum** and communication platform on `PyPSA-meets-Earth `__ Discord Server + 2. **Chat on Discord with us** in the following meetings: + - General initiative meeting for project news and `high-level code updates `__. Held every `fourth Thursday 16-17:00 (UK time) `__ and is a perfect place to meet the community and get a high-level update on PyPSA ecosystem relevant for PyPSA-Earth developments. + - Weekly developers meetings + - Eastern-Hemisphere friendly *Morning meeting* every `Thursday at 09:00 (UK time) `__. + - Western-Hemisphere friendly *Evening meeting* every `Thursday 16:00 (UK time) `__. Every forth Thursday is replaced by the General initiative meeting which has a more high-level perspective, but you can also join to discuss more particular questions. + 3. **Look at public materials** at `google Drive `__ to share to minutes, presentations, lists and documents. Feel gree to get a look! + 4. **Notify your interest** to on-demand meetings: + - Demand creation and prediction meeting + - AI asset detection meeting + - Outreach meeting for planning, discussing events, workshops, communication, community activities + 5. Join us and **propose your stream**. ============= Documentation diff --git a/doc/release_notes.rst b/doc/release_notes.rst index f53037d72..b28b545b5 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -17,12 +17,24 @@ This part of documentation collects descriptive release notes to capture the mai * In alternative clustering, generate hydro inflows by shape and avoid hydro inflows duplication for plants installed in the same node `PR #1120 ` +* Add a function to calculate length-based efficiencies and apply it to the H2 pipelines. `PR #1192 `__ + +* Support of Linopy for Power and Sector-Coupled Modelling and latest PyPSA version `PR #1172 `__ + +* Update workflow to geopandas >= 1.0 `PR #1276 `__ + **Minor Changes and bug-fixing** * Prevent computation of powerplantmatching if replace option is selected for custom_powerplants `PR #1281 `__ * Fix overlapping bus regions when alternative clustering is selected `PR #1287 `__ +* Fix readthedocs by explicitly specifying the location of the Sphinx config `PR #1292 `__ + +* Fix lossy bidirectional links, especially H2 pipelines, which would sometimes gain H2 instead of losing it. `PR #1192 `__ + +* Fix the need for administrative rights on Windows by changing all shadow directory settings for Windows in the Snakefile `PR #1295 `__ and `PR #1301 `__ + PyPSA-Earth 0.6.0 ================= @@ -148,6 +160,7 @@ PyPSA-Earth 0.4.0 * Add an option to use csv format for custom demand imports. `PR #995 `__ + **Minor Changes and bug-fixing** * Minor bug-fixing to run the cluster wildcard min `PR #1019 `__ diff --git a/envs/environment.yaml b/envs/environment.yaml index 4c172b36e..c336868b1 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -12,7 +12,8 @@ dependencies: - pip - mamba # esp for windows build -- pypsa>=0.24, <0.25 +- pypsa>=0.25 +# - atlite>=0.2.4 # until https://github.com/PyPSA/atlite/issues/244 is not merged - dask # currently the packages are being installed with pip # need to move back to conda once the issues will be resolved @@ -28,13 +29,14 @@ dependencies: - memory_profiler - ruamel.yaml<=0.17.26 - pytables +- pyscipopt # added to compy with the quadratic objective requirement of the clustering script - lxml - numpy # starting from 1.3.5 numpoly requires numpy>2.0 which leads to issues - numpoly<=1.3.4 - pandas -- geopandas>=0.11.0, <=0.14.3 -- fiona!=1.8.22, <1.10.0 +- geopandas>=1 +- fiona>=1.10 - xarray>=2023.11.0, <2023.12.0 - netcdf4 - networkx @@ -49,6 +51,7 @@ dependencies: - pyogrio - numba - py7zr +- tsam>=1.1.0 # Keep in conda environment when calling ipython - ipython @@ -80,11 +83,9 @@ dependencies: # Default solver for tests (required for CI) - glpk -- ipopt - gurobi - pip: - git+https://github.com/davide-f/google-drive-downloader@master # google drive with fix for virus scan - - tsam>=1.1.0 - chaospy # lastest version only available on pip - fake_useragent diff --git a/envs/linux-pinned.yaml b/envs/linux-pinned.yaml index 53b2b8f48..f148d16fa 100644 --- a/envs/linux-pinned.yaml +++ b/envs/linux-pinned.yaml @@ -17,7 +17,7 @@ dependencies: - alsa-lib=1.2.13 - ampl-asl=1.0.0 - amply=0.1.6 -- anyio=4.7.0 +- anyio=4.8.0 - appdirs=1.4.4 - argon2-cffi=23.1.0 - argon2-cffi-bindings=21.2.0 @@ -37,9 +37,9 @@ dependencies: - aws-c-io=0.15.3 - aws-c-mqtt=0.11.0 - aws-c-s3=0.7.7 -- aws-c-sdkutils=0.2.1 +- aws-c-sdkutils=0.2.2 - aws-checksums=0.2.2 -- aws-crt-cpp=0.29.7 +- aws-crt-cpp=0.29.8 - aws-sdk-cpp=1.11.458 - azure-core-cpp=1.14.0 - azure-identity-cpp=1.10.0 @@ -49,6 +49,7 @@ dependencies: - babel=2.16.0 - beautifulsoup4=4.12.3 - bleach=6.2.0 +- bleach-with-css=6.2.0 - blosc=1.21.6 - bokeh=3.6.2 - bottleneck=1.4.2 @@ -70,13 +71,13 @@ dependencies: - certifi=2024.12.14 - cffi=1.17.1 - cfgv=3.3.1 -- cfitsio=4.4.1 +- cfitsio=4.5.0 - cftime=1.6.4 -- charset-normalizer=3.4.0 +- charset-normalizer=3.4.1 - click=8.1.8 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=3.1.0 +- cloudpickle=3.1.1 - coin-or-cbc=2.10.12 - coin-or-cgl=0.60.9 - coin-or-clp=1.17.10 @@ -92,13 +93,14 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cycler=0.12.1 - cyrus-sasl=2.1.27 - cytoolz=1.0.1 - dask=2024.12.1 - dask-core=2024.12.1 - dask-expr=1.1.21 -- datapi=0.1.1 +- datapi=0.1.2 - datashader=0.16.3 - datrie=0.8.2 - dbus=1.13.6 @@ -119,9 +121,9 @@ dependencies: - executing=2.1.0 - expat=2.6.4 - filelock=3.16.1 -- fiona=1.9.6 +- fiona=1.10.1 - fmt=11.0.2 -- folium=0.19.2 +- folium=0.19.4 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -140,8 +142,8 @@ dependencies: - gdk-pixbuf=2.42.12 - geographiclib=2.0 - geojson-rewind=1.1.0 -- geopandas=0.14.3 -- geopandas-base=0.14.3 +- geopandas=1.0.1 +- geopandas-base=1.0.1 - geopy=2.4.1 - geos=3.13.0 - geotiff=1.7.3 @@ -151,8 +153,8 @@ dependencies: - gettext-tools=0.22.5 - gflags=2.2.2 - giflib=5.2.2 -- gitdb=4.0.11 -- gitpython=3.1.43 +- gitdb=4.0.12 +- gitpython=3.1.44 - glib=2.82.2 - glib-tools=2.82.2 - glog=0.7.1 @@ -167,9 +169,10 @@ dependencies: - gurobi=12.0.0 - h11=0.14.0 - h2=4.1.0 -- harfbuzz=10.1.0 +- harfbuzz=10.2.0 - hdf4=4.2.15 - hdf5=1.14.3 +- highspy=1.9.0 - holoviews=1.20.0 - hpack=4.0.0 - httpcore=1.0.7 @@ -178,11 +181,11 @@ dependencies: - hvplot=0.11.2 - hyperframe=6.0.1 - icu=75.1 -- identify=2.6.3 +- identify=2.6.5 - idna=3.10 - importlib-metadata=8.5.0 - importlib_metadata=8.5.0 -- importlib_resources=6.4.5 +- importlib_resources=6.5.2 - inflate64=1.0.1 - iniconfig=2.0.0 - ipopt=3.14.17 @@ -207,7 +210,7 @@ dependencies: - jupyterlab=4.3.4 - jupyterlab_pygments=0.3.0 - jupyterlab_server=2.27.3 -- kealib=1.6.0 +- kealib=1.6.1 - keyutils=1.6.1 - kiwisolver=1.4.7 - krb5=1.21.3 @@ -230,14 +233,14 @@ dependencies: - libbrotlienc=1.1.0 - libcap=2.71 - libcblas=3.9.0 -- libclang-cpp19.1=19.1.6 -- libclang13=19.1.6 +- libclang-cpp19.1=19.1.7 +- libclang13=19.1.7 - libcrc32c=1.1.2 - libcups=2.3.3 - libcurl=8.11.1 - libdeflate=1.23 - libdrm=2.4.124 -- libedit=3.1.20191231 +- libedit=3.1.20240808 - libegl=1.7.0 - libev=4.33 - libevent=2.1.12 @@ -272,8 +275,8 @@ dependencies: - libglvnd=1.7.0 - libglx=1.7.0 - libgomp=14.2.0 -- libgoogle-cloud=2.32.0 -- libgoogle-cloud-storage=2.32.0 +- libgoogle-cloud=2.33.0 +- libgoogle-cloud-storage=2.33.0 - libgpg-error=1.51 - libgrpc=1.67.1 - libhwloc=2.11.2 @@ -283,7 +286,7 @@ dependencies: - liblapack=3.9.0 - liblapacke=3.9.0 - libllvm14=14.0.6 -- libllvm19=19.1.6 +- libllvm19=19.1.7 - liblzma=5.6.3 - liblzma-devel=5.6.3 - libmamba=2.0.5 @@ -296,9 +299,9 @@ dependencies: - libopus=1.3.1 - libparquet=18.1.0 - libpciaccess=0.18 -- libpng=1.6.44 +- libpng=1.6.45 - libpq=17.2 -- libprotobuf=5.28.2 +- libprotobuf=5.28.3 - libre2-11=2024.07.02 - librsvg=2.58.4 - librttopo=1.1.0 @@ -306,14 +309,13 @@ dependencies: - libsndfile=1.2.2 - libsodium=1.0.20 - libsolv=0.7.30 -- libspatialindex=2.1.0 - libspatialite=5.1.0 - libspral=2024.05.08 - libsqlite=3.47.2 - libssh2=1.11.1 - libstdcxx=14.2.0 - libstdcxx-ng=14.2.0 -- libsystemd0=256.9 +- libsystemd0=257.2 - libthrift=0.21.0 - libtiff=4.7.0 - libutf8proc=2.9.0 @@ -349,7 +351,8 @@ dependencies: - mercantile=1.2.1 - metis=5.1.0 - minizip=4.0.7 -- mistune=3.0.2 +- mistune=3.1.0 +- mpfr=4.2.1 - mpg123=1.32.9 - msgpack-python=1.1.0 - multipledispatch=0.6.0 @@ -361,7 +364,7 @@ dependencies: - mysql-common=9.0.1 - mysql-libs=9.0.1 - nbclient=0.10.2 -- nbconvert-core=7.16.4 +- nbconvert-core=7.16.5 - nbformat=5.10.4 - ncurses=6.5 - nest-asyncio=1.6.0 @@ -387,7 +390,7 @@ dependencies: - pandas=2.2.2 - pandocfilters=1.5.0 - panel=1.5.5 -- pango=1.54.0 +- pango=1.56.0 - param=2.2.0 - parso=0.8.4 - partd=1.4.2 @@ -395,7 +398,7 @@ dependencies: - pcre2=10.44 - pexpect=4.9.0 - pickleshare=0.7.5 -- pillow=11.0.0 +- pillow=11.1.0 - pip=24.3.1 - pixman=0.44.2 - pkgutil-resolve-name=1.3.10 @@ -407,14 +410,14 @@ dependencies: - poppler=24.12.0 - poppler-data=0.4.12 - postgresql=17.2 -- powerplantmatching=0.6.0 +- powerplantmatching=0.6.1 - pre-commit=4.0.1 - progressbar2=4.5.0 - proj=9.5.1 - prometheus_client=0.21.1 - prompt-toolkit=3.0.48 -- protobuf=5.28.2 -- psutil=6.1.0 +- protobuf=5.28.3 +- psutil=6.1.1 - pthread-stubs=0.4 - ptyprocess=0.7.0 - pulp=2.7.0 @@ -430,15 +433,16 @@ dependencies: - pycryptodomex=3.21.0 - pyct=0.5.0 - pydoe2=1.3.0 -- pygments=2.18.0 +- pygments=2.19.1 - pyogrio=0.10.0 -- pyomo=6.8.2 -- pyparsing=3.2.0 +- pyomo=6.6.1 +- pyparsing=3.2.1 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 - pyqt=5.15.9 - pyqt5-sip=12.12.2 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -451,7 +455,7 @@ dependencies: - python-utils=3.9.1 - python_abi=3.10 - pytz=2024.2 -- pyviz_comms=3.0.3 +- pyviz_comms=3.0.4 - pyyaml=6.0.2 - pyzmq=26.2.0 - pyzstd=0.16.2 @@ -469,18 +473,18 @@ dependencies: - rfc3986-validator=0.1.1 - rioxarray=0.17.0 - rpds-py=0.22.3 -- rtree=1.3.0 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 - s2n=1.5.10 -- scikit-learn=1.6.0 -- scipy=1.14.1 +- scikit-learn=1.6.1 +- scip=9.2.0 +- scipy=1.15.1 - seaborn=0.13.2 - seaborn-base=0.13.2 - send2trash=1.8.3 -- setuptools=75.6.0 +- setuptools=75.8.0 - shapely=2.0.6 -- simdjson=3.11.3 +- simdjson=3.11.6 - sip=6.7.12 - six=1.17.0 - smart_open=7.1.0 @@ -491,12 +495,13 @@ dependencies: - snuggs=1.4.7 - sortedcontainers=2.4.0 - soupsieve=2.5 -- spdlog=1.14.1 +- spdlog=1.15.0 - sqlite=3.47.2 - stack_data=0.6.3 - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -512,6 +517,7 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 @@ -520,19 +526,19 @@ dependencies: - tzdata=2024b - uc-micro-py=1.0.3 - ukkonen=1.0.1 -- unicodedata2=15.1.0 +- unicodedata2=16.0.0 - unidecode=1.3.8 - uri-template=1.3.0 - uriparser=0.9.8 - urllib3=2.3.0 - validators=0.34.0 -- virtualenv=20.28.0 +- virtualenv=20.29.0 - wcwidth=0.2.13 - webcolors=24.11.1 - webencodings=0.5.1 - websocket-client=1.8.0 - wheel=0.45.1 -- wrapt=1.17.0 +- wrapt=1.17.2 - xarray=2023.11.0 - xcb-util=0.4.1 - xcb-util-image=0.4.0 @@ -564,13 +570,11 @@ dependencies: - zipfile-deflate64=0.2.0 - zipp=3.21.0 - zlib=1.3.1 -- zlib-ng=2.2.2 +- zlib-ng=2.2.3 - zstandard=0.23.0 - zstd=1.5.6 - pip: - chaospy==4.3.17 - fake-useragent==2.0.3 - googledrivedownloader==0.4 - - highspy==1.9.0 - - tsam==2.3.6 prefix: /usr/share/miniconda/envs/pypsa-earth diff --git a/envs/macos-pinned.yaml b/envs/macos-pinned.yaml index 38c73a2fc..ba162351c 100644 --- a/envs/macos-pinned.yaml +++ b/envs/macos-pinned.yaml @@ -14,7 +14,7 @@ dependencies: - affine=2.4.0 - ampl-asl=1.0.0 - amply=0.1.6 -- anyio=4.7.0 +- anyio=4.8.0 - appdirs=1.4.4 - appnope=0.1.4 - argon2-cffi=23.1.0 @@ -34,9 +34,9 @@ dependencies: - aws-c-io=0.15.3 - aws-c-mqtt=0.11.0 - aws-c-s3=0.7.7 -- aws-c-sdkutils=0.2.1 +- aws-c-sdkutils=0.2.2 - aws-checksums=0.2.2 -- aws-crt-cpp=0.29.7 +- aws-crt-cpp=0.29.8 - aws-sdk-cpp=1.11.458 - azure-core-cpp=1.14.0 - azure-identity-cpp=1.10.0 @@ -46,6 +46,7 @@ dependencies: - babel=2.16.0 - beautifulsoup4=4.12.3 - bleach=6.2.0 +- bleach-with-css=6.2.0 - blosc=1.21.6 - bokeh=3.6.2 - bottleneck=1.4.2 @@ -67,13 +68,13 @@ dependencies: - certifi=2024.12.14 - cffi=1.17.1 - cfgv=3.3.1 -- cfitsio=4.4.1 +- cfitsio=4.5.0 - cftime=1.6.4 -- charset-normalizer=3.4.0 +- charset-normalizer=3.4.1 - click=8.1.8 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=3.1.0 +- cloudpickle=3.1.1 - coin-or-cbc=2.10.12 - coin-or-cgl=0.60.9 - coin-or-clp=1.17.10 @@ -89,13 +90,14 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cycler=0.12.1 - cyrus-sasl=2.1.27 - cytoolz=1.0.1 - dask=2024.12.1 - dask-core=2024.12.1 - dask-expr=1.1.21 -- datapi=0.1.1 +- datapi=0.1.2 - datashader=0.16.3 - datrie=0.8.2 - debugpy=1.8.11 @@ -114,9 +116,9 @@ dependencies: - exceptiongroup=1.2.2 - executing=2.1.0 - filelock=3.16.1 -- fiona=1.9.6 +- fiona=1.10.1 - fmt=11.0.2 -- folium=0.19.2 +- folium=0.19.4 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -135,8 +137,8 @@ dependencies: - gdk-pixbuf=2.42.12 - geographiclib=2.0 - geojson-rewind=1.1.0 -- geopandas=0.14.3 -- geopandas-base=0.14.3 +- geopandas=1.0.1 +- geopandas-base=1.0.1 - geopy=2.4.1 - geos=3.13.0 - geotiff=1.7.3 @@ -144,8 +146,8 @@ dependencies: - geoviews-core=1.14.0 - gflags=2.2.2 - giflib=5.2.2 -- gitdb=4.0.11 -- gitpython=3.1.43 +- gitdb=4.0.12 +- gitpython=3.1.44 - glog=0.7.1 - glpk=5.0 - gmp=6.3.0 @@ -156,9 +158,10 @@ dependencies: - gurobi=12.0.0 - h11=0.14.0 - h2=4.1.0 -- harfbuzz=10.1.0 +- harfbuzz=10.2.0 - hdf4=4.2.15 - hdf5=1.14.3 +- highspy=1.9.0 - holoviews=1.20.0 - hpack=4.0.0 - httpcore=1.0.7 @@ -167,11 +170,11 @@ dependencies: - hvplot=0.11.2 - hyperframe=6.0.1 - icu=75.1 -- identify=2.6.3 +- identify=2.6.5 - idna=3.10 - importlib-metadata=8.5.0 - importlib_metadata=8.5.0 -- importlib_resources=6.4.5 +- importlib_resources=6.5.2 - inflate64=1.0.1 - iniconfig=2.0.0 - ipopt=3.14.17 @@ -196,7 +199,7 @@ dependencies: - jupyterlab=4.3.4 - jupyterlab_pygments=0.3.0 - jupyterlab_server=2.27.3 -- kealib=1.6.0 +- kealib=1.6.1 - kiwisolver=1.4.7 - krb5=1.21.3 - lcms2=2.16 @@ -215,9 +218,9 @@ dependencies: - libcblas=3.9.0 - libcrc32c=1.1.2 - libcurl=8.11.1 -- libcxx=19.1.6 +- libcxx=19.1.7 - libdeflate=1.23 -- libedit=3.1.20191231 +- libedit=3.1.20240808 - libev=4.33 - libevent=2.1.12 - libexpat=2.6.4 @@ -240,9 +243,10 @@ dependencies: - libgfortran=5.0.0 - libgfortran5=13.2.0 - libglib=2.82.2 -- libgoogle-cloud=2.32.0 -- libgoogle-cloud-storage=2.32.0 +- libgoogle-cloud=2.33.0 +- libgoogle-cloud-storage=2.33.0 - libgrpc=1.67.1 +- libhwloc=2.11.2 - libiconv=1.17 - libintl=0.22.5 - libjpeg-turbo=3.0.0 @@ -257,16 +261,15 @@ dependencies: - libntlm=1.8 - libopenblas=0.3.28 - libparquet=18.1.0 -- libpng=1.6.44 +- libpng=1.6.45 - libpq=17.2 -- libprotobuf=5.28.2 +- libprotobuf=5.28.3 - libre2-11=2024.07.02 - librsvg=2.58.4 - librttopo=1.1.0 -- libscotch=7.0.5 +- libscotch=7.0.6 - libsodium=1.0.20 - libsolv=0.7.30 -- libspatialindex=2.1.0 - libspatialite=5.1.0 - libsqlite=3.47.2 - libssh2=1.11.1 @@ -281,7 +284,7 @@ dependencies: - libzlib=1.3.1 - linkify-it-py=2.0.3 - linopy=0.3.11 -- llvm-openmp=19.1.6 +- llvm-openmp=19.1.7 - llvmlite=0.43.0 - locket=1.0.0 - lxml=5.3.0 @@ -302,7 +305,8 @@ dependencies: - mercantile=1.2.1 - metis=5.1.0 - minizip=4.0.7 -- mistune=3.0.2 +- mistune=3.1.0 +- mpfr=4.2.1 - msgpack-python=1.1.0 - multipledispatch=0.6.0 - multiurl=0.3.3 @@ -311,7 +315,7 @@ dependencies: - mumps-seq=5.7.3 - munkres=1.1.4 - nbclient=0.10.2 -- nbconvert-core=7.16.4 +- nbconvert-core=7.16.5 - nbformat=5.10.4 - ncurses=6.5 - nest-asyncio=1.6.0 @@ -336,7 +340,7 @@ dependencies: - pandas=2.2.2 - pandocfilters=1.5.0 - panel=1.5.5 -- pango=1.54.0 +- pango=1.56.0 - param=2.2.0 - parso=0.8.4 - partd=1.4.2 @@ -344,7 +348,7 @@ dependencies: - pcre2=10.44 - pexpect=4.9.0 - pickleshare=0.7.5 -- pillow=11.0.0 +- pillow=11.1.0 - pip=24.3.1 - pixman=0.44.2 - pkgutil-resolve-name=1.3.10 @@ -356,14 +360,14 @@ dependencies: - poppler=24.12.0 - poppler-data=0.4.12 - postgresql=17.2 -- powerplantmatching=0.6.0 +- powerplantmatching=0.6.1 - pre-commit=4.0.1 - progressbar2=4.5.0 - proj=9.5.1 - prometheus_client=0.21.1 - prompt-toolkit=3.0.48 -- protobuf=5.28.2 -- psutil=6.1.0 +- protobuf=5.28.3 +- psutil=6.1.1 - pthread-stubs=0.4 - ptyprocess=0.7.0 - pulp=2.7.0 @@ -378,15 +382,16 @@ dependencies: - pycryptodomex=3.21.0 - pyct=0.5.0 - pydoe2=1.3.0 -- pygments=2.18.0 -- pyobjc-core=10.3.2 -- pyobjc-framework-cocoa=10.3.2 +- pygments=2.19.1 +- pyobjc-core=11.0 +- pyobjc-framework-cocoa=11.0 - pyogrio=0.10.0 -- pyomo=6.8.2 -- pyparsing=3.2.0 +- pyomo=6.6.1 +- pyparsing=3.2.1 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -399,7 +404,7 @@ dependencies: - python-utils=3.9.1 - python_abi=3.10 - pytz=2024.2 -- pyviz_comms=3.0.3 +- pyviz_comms=3.0.4 - pyyaml=6.0.2 - pyzmq=26.2.0 - pyzstd=0.16.2 @@ -416,17 +421,17 @@ dependencies: - rfc3986-validator=0.1.1 - rioxarray=0.17.0 - rpds-py=0.22.3 -- rtree=1.3.0 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 -- scikit-learn=1.6.0 -- scipy=1.14.1 +- scikit-learn=1.6.1 +- scip=9.2.0 +- scipy=1.15.1 - seaborn=0.13.2 - seaborn-base=0.13.2 - send2trash=1.8.3 -- setuptools=75.6.0 +- setuptools=75.8.0 - shapely=2.0.6 -- simdjson=3.11.3 +- simdjson=3.11.6 - six=1.17.0 - smart_open=7.1.0 - smmap=5.0.0 @@ -436,12 +441,13 @@ dependencies: - snuggs=1.4.7 - sortedcontainers=2.4.0 - soupsieve=2.5 -- spdlog=1.14.1 +- spdlog=1.15.0 - sqlite=3.47.2 - stack_data=0.6.3 - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -456,6 +462,7 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 @@ -464,19 +471,19 @@ dependencies: - tzdata=2024b - uc-micro-py=1.0.3 - ukkonen=1.0.1 -- unicodedata2=15.1.0 +- unicodedata2=16.0.0 - unidecode=1.3.8 - uri-template=1.3.0 - uriparser=0.9.8 - urllib3=2.3.0 - validators=0.34.0 -- virtualenv=20.28.0 +- virtualenv=20.29.0 - wcwidth=0.2.13 - webcolors=24.11.1 - webencodings=0.5.1 - websocket-client=1.8.0 - wheel=0.45.1 -- wrapt=1.17.0 +- wrapt=1.17.2 - xarray=2023.11.0 - xerces-c=3.2.5 - xlrd=2.0.1 @@ -491,13 +498,11 @@ dependencies: - zipfile-deflate64=0.2.0 - zipp=3.21.0 - zlib=1.3.1 -- zlib-ng=2.2.2 +- zlib-ng=2.2.3 - zstandard=0.23.0 - zstd=1.5.6 - pip: - chaospy==4.3.17 - fake-useragent==2.0.3 - googledrivedownloader==0.4 - - highspy==1.9.0 - - tsam==2.3.6 prefix: /Users/runner/miniconda3/envs/pypsa-earth diff --git a/envs/windows-pinned.yaml b/envs/windows-pinned.yaml index 4ac607945..deaf33f2b 100644 --- a/envs/windows-pinned.yaml +++ b/envs/windows-pinned.yaml @@ -16,7 +16,7 @@ dependencies: - affine=2.4.0 - ampl-asl=1.0.0 - amply=0.1.6 -- anyio=4.7.0 +- anyio=4.8.0 - appdirs=1.4.4 - argon2-cffi=23.1.0 - argon2-cffi-bindings=21.2.0 @@ -34,9 +34,9 @@ dependencies: - aws-c-io=0.15.3 - aws-c-mqtt=0.11.0 - aws-c-s3=0.7.7 -- aws-c-sdkutils=0.2.1 +- aws-c-sdkutils=0.2.2 - aws-checksums=0.2.2 -- aws-crt-cpp=0.29.7 +- aws-crt-cpp=0.29.8 - aws-sdk-cpp=1.11.458 - azure-core-cpp=1.14.0 - azure-identity-cpp=1.10.0 @@ -45,6 +45,7 @@ dependencies: - babel=2.16.0 - beautifulsoup4=4.12.3 - bleach=6.2.0 +- bleach-with-css=6.2.0 - blosc=1.21.6 - bokeh=3.6.2 - bottleneck=1.4.2 @@ -66,13 +67,13 @@ dependencies: - certifi=2024.12.14 - cffi=1.17.1 - cfgv=3.3.1 -- cfitsio=4.4.1 +- cfitsio=4.5.0 - cftime=1.6.4 -- charset-normalizer=3.4.0 +- charset-normalizer=3.4.1 - click=8.1.8 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=3.1.0 +- cloudpickle=3.1.1 - colorama=0.4.6 - colorcet=3.1.0 - comm=0.2.2 @@ -82,13 +83,14 @@ dependencies: - contourpy=1.3.1 - country_converter=1.2 - cpp-expected=1.1.0 +- cppad=20240000.7 - cpython=3.10.16 - cycler=0.12.1 - cytoolz=1.0.1 - dask=2024.12.1 - dask-core=2024.12.1 - dask-expr=1.1.21 -- datapi=0.1.1 +- datapi=0.1.2 - datashader=0.16.3 - datrie=0.8.2 - debugpy=1.8.11 @@ -107,9 +109,9 @@ dependencies: - exceptiongroup=1.2.2 - executing=2.1.0 - filelock=3.16.1 -- fiona=1.9.6 +- fiona=1.10.1 - fmt=11.0.2 -- folium=0.19.2 +- folium=0.19.4 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -127,19 +129,20 @@ dependencies: - gdal=3.9.3 - geographiclib=2.0 - geojson-rewind=1.1.0 -- geopandas=0.14.3 -- geopandas-base=0.14.3 +- geopandas=1.0.1 +- geopandas-base=1.0.1 - geopy=2.4.1 - geos=3.13.0 - geotiff=1.7.3 - geoviews=1.14.0 - geoviews-core=1.14.0 - getopt-win32=0.1 -- gitdb=4.0.11 -- gitpython=3.1.43 +- gitdb=4.0.12 +- gitpython=3.1.44 - glib=2.82.2 - glib-tools=2.82.2 - glpk=5.0 +- gmp=6.3.0 - graphite2=1.3.13 - graphviz=12.0.0 - gst-plugins-base=1.24.7 @@ -148,9 +151,10 @@ dependencies: - gurobi=12.0.0 - h11=0.14.0 - h2=4.1.0 -- harfbuzz=10.1.0 +- harfbuzz=10.2.0 - hdf4=4.2.15 - hdf5=1.14.3 +- highspy=1.9.0 - holoviews=1.20.0 - hpack=4.0.0 - httpcore=1.0.7 @@ -159,11 +163,11 @@ dependencies: - hvplot=0.11.2 - hyperframe=6.0.1 - icu=75.1 -- identify=2.6.3 +- identify=2.6.5 - idna=3.10 - importlib-metadata=8.5.0 - importlib_metadata=8.5.0 -- importlib_resources=6.4.5 +- importlib_resources=6.5.2 - inflate64=1.0.1 - iniconfig=2.0.0 - ipopt=3.14.17 @@ -187,7 +191,7 @@ dependencies: - jupyterlab=4.3.4 - jupyterlab_pygments=0.3.0 - jupyterlab_server=2.27.3 -- kealib=1.6.0 +- kealib=1.6.1 - kiwisolver=1.4.7 - krb5=1.21.3 - lcms2=2.16 @@ -200,11 +204,12 @@ dependencies: - libarrow-dataset=18.1.0 - libarrow-substrait=18.1.0 - libblas=3.9.0 +- libboost=1.86.0 - libbrotlicommon=1.1.0 - libbrotlidec=1.1.0 - libbrotlienc=1.1.0 - libcblas=3.9.0 -- libclang13=19.1.6 +- libclang13=19.1.7 - libcrc32c=1.1.2 - libcurl=8.11.1 - libdeflate=1.23 @@ -230,9 +235,10 @@ dependencies: - libgdal-xls=3.9.3 - libglib=2.82.2 - libgomp=14.2.0 -- libgoogle-cloud=2.32.0 -- libgoogle-cloud-storage=2.32.0 +- libgoogle-cloud=2.33.0 +- libgoogle-cloud-storage=2.33.0 - libgrpc=1.67.1 +- libhwloc=2.11.2 - libiconv=1.17 - libintl=0.22.5 - libintl-devel=0.22.5 @@ -245,14 +251,13 @@ dependencies: - libogg=1.3.5 - libopenblas=0.3.28 - libparquet=18.1.0 -- libpng=1.6.44 +- libpng=1.6.45 - libpq=17.2 -- libprotobuf=5.28.2 +- libprotobuf=5.28.3 - libre2-11=2024.07.02 - librttopo=1.1.0 - libsodium=1.0.20 - libsolv=0.7.30 -- libspatialindex=2.1.0 - libspatialite=5.1.0 - libsqlite=3.47.2 - libssh2=1.11.1 @@ -289,7 +294,8 @@ dependencies: - memory_profiler=0.61.0 - mercantile=1.2.1 - minizip=4.0.7 -- mistune=3.0.2 +- mistune=3.1.0 +- mpfr=4.2.1 - msgpack-python=1.1.0 - multipledispatch=0.6.0 - multiurl=0.3.3 @@ -297,7 +303,7 @@ dependencies: - mumps-seq=5.7.3 - munkres=1.1.4 - nbclient=0.10.2 -- nbconvert-core=7.16.4 +- nbconvert-core=7.16.5 - nbformat=5.10.4 - nest-asyncio=1.6.0 - netcdf4=1.7.2 @@ -320,14 +326,14 @@ dependencies: - pandas=2.2.2 - pandocfilters=1.5.0 - panel=1.5.5 -- pango=1.54.0 +- pango=1.56.0 - param=2.2.0 - parso=0.8.4 - partd=1.4.2 - patsy=1.0.1 - pcre2=10.44 - pickleshare=0.7.5 -- pillow=11.0.0 +- pillow=11.1.0 - pip=24.3.1 - pixman=0.44.2 - pkgutil-resolve-name=1.3.10 @@ -338,14 +344,14 @@ dependencies: - poppler=24.12.0 - poppler-data=0.4.12 - postgresql=17.2 -- powerplantmatching=0.6.0 +- powerplantmatching=0.6.1 - pre-commit=4.0.1 - progressbar2=4.5.0 - proj=9.5.1 - prometheus_client=0.21.1 - prompt-toolkit=3.0.48 -- protobuf=5.28.2 -- psutil=6.1.0 +- protobuf=5.28.3 +- psutil=6.1.1 - pthread-stubs=0.4 - pulp=2.7.0 - pure_eval=0.2.3 @@ -359,16 +365,17 @@ dependencies: - pycryptodomex=3.21.0 - pyct=0.5.0 - pydoe2=1.3.0 -- pygments=2.18.0 +- pygments=2.19.1 - pyogrio=0.10.0 -- pyomo=6.8.2 -- pyparsing=3.2.0 +- pyomo=6.6.1 +- pyparsing=3.2.1 - pyppmd=1.1.0 - pyproj=3.7.0 -- pypsa=0.24.0 +- pypsa=0.28.0 - pyqt=5.15.9 - pyqt5-sip=12.12.2 - pyreadline3=3.5.4 +- pyscipopt=5.2.1 - pyshp=2.3.1 - pysocks=1.7.1 - pytables=3.10.1 @@ -381,7 +388,7 @@ dependencies: - python-utils=3.9.1 - python_abi=3.10 - pytz=2024.2 -- pyviz_comms=3.0.3 +- pyviz_comms=3.0.4 - pywin32=307 - pywinpty=2.0.14 - pyyaml=6.0.2 @@ -400,17 +407,17 @@ dependencies: - rfc3986-validator=0.1.1 - rioxarray=0.17.0 - rpds-py=0.22.3 -- rtree=1.3.0 - ruamel.yaml=0.17.26 - ruamel.yaml.clib=0.2.8 -- scikit-learn=1.6.0 -- scipy=1.14.1 +- scikit-learn=1.6.1 +- scip=9.2.0 +- scipy=1.15.1 - seaborn=0.13.2 - seaborn-base=0.13.2 - send2trash=1.8.3 -- setuptools=75.6.0 +- setuptools=75.8.0 - shapely=2.0.6 -- simdjson=3.11.3 +- simdjson=3.11.6 - sip=6.7.12 - six=1.17.0 - smart_open=7.1.0 @@ -421,12 +428,13 @@ dependencies: - snuggs=1.4.7 - sortedcontainers=2.4.0 - soupsieve=2.5 -- spdlog=1.14.1 +- spdlog=1.15.0 - sqlite=3.47.2 - stack_data=0.6.3 - statsmodels=0.14.4 - stopit=1.1.2 - tabulate=0.9.0 +- tbb=2022.0.0 - tblib=3.0.0 - terminado=0.18.1 - texttable=1.7.0 @@ -442,6 +450,7 @@ dependencies: - tornado=6.4.2 - tqdm=4.67.1 - traitlets=5.14.3 +- tsam=2.3.6 - types-python-dateutil=2.9.0.20241206 - typing-extensions=4.12.2 - typing_extensions=4.12.2 @@ -450,7 +459,7 @@ dependencies: - uc-micro-py=1.0.3 - ucrt=10.0.22621.0 - ukkonen=1.0.1 -- unicodedata2=15.1.0 +- unicodedata2=16.0.0 - unidecode=1.3.8 - uri-template=1.3.0 - uriparser=0.9.8 @@ -458,7 +467,7 @@ dependencies: - validators=0.34.0 - vc=14.3 - vc14_runtime=14.42.34433 -- virtualenv=20.28.0 +- virtualenv=20.29.0 - vs2015_runtime=14.42.34433 - wcwidth=0.2.13 - webcolors=24.11.1 @@ -467,7 +476,7 @@ dependencies: - wheel=0.45.1 - win_inet_pton=1.1.0 - winpty=0.4.3 -- wrapt=1.17.0 +- wrapt=1.17.2 - xarray=2023.11.0 - xerces-c=3.2.5 - xlrd=2.0.1 @@ -488,14 +497,12 @@ dependencies: - zipfile-deflate64=0.2.0 - zipp=3.21.0 - zlib=1.3.1 -- zlib-ng=2.2.2 +- zlib-ng=2.2.3 - zstandard=0.23.0 - zstd=1.5.6 - pip: - chaospy==4.3.17 - fake-useragent==2.0.3 - googledrivedownloader==0.4 - - highspy==1.9.0 - polars==1.17.1 - - tsam==2.3.6 prefix: C:\Miniconda\envs\pypsa-earth diff --git a/scripts/_helpers.py b/scripts/_helpers.py index ddb6fda5f..33d5c7d78 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -60,7 +60,8 @@ def check_config_version(config, fp_config=CONFIG_DEFAULT_PATH): f"The current version of 'config.yaml' doesn't match to the code version:\n\r" f" {current_config_version} provided, {actual_config_version} expected.\n\r" f"That can lead to weird errors during execution of the workflow.\n\r" - f"Please update 'config.yaml' according to 'config.default.yaml' ." + f"Please update 'config.yaml' according to 'config.default.yaml.'\n\r" + "If issues persist, consider to update the environment to the latest version." ) @@ -921,6 +922,16 @@ def get_last_commit_message(path): return last_commit_message +def update_config_dictionary( + config_dict, + parameter_key_to_fill="lines", + dict_to_use={"geometry": "first", "bounds": "first"}, +): + config_dict.setdefault(parameter_key_to_fill, {}) + config_dict[parameter_key_to_fill].update(dict_to_use) + return config_dict + + # PYPSA-EARTH-SEC def annuity(n, r): """ @@ -1273,7 +1284,7 @@ def get_GADM_layer(country_list, layer_id, update=False, outlogging=False): def locate_bus( coords, co, - gadm_level, + gadm_layer_id, path_to_gadm=None, gadm_clustering=False, col="name", @@ -1305,8 +1316,8 @@ def locate_bus( lambda name: three_2_two_digits_country(name[:3]) + name[3:] ) else: - gdf = get_GADM_layer(co, gadm_level) - col = "GID_{}".format(gadm_level) + gdf = get_GADM_layer(co, gadm_layer_id) + col = "GID_{}".format(gadm_layer_id) # gdf.set_index("GADM_ID", inplace=True) gdf_co = gdf[ @@ -1483,3 +1494,91 @@ def safe_divide(numerator, denominator, default_value=np.nan): f"Division by zero: {numerator} / {denominator}, returning NaN." ) return np.nan + + +def lossy_bidirectional_links(n, carrier): + """ + Split bidirectional links of type carrier into two unidirectional links to include transmission losses. + """ + + # identify all links of type carrier + carrier_i = n.links.query("carrier == @carrier").index + + if carrier_i.empty: + return + + logger.info(f"Splitting bidirectional links with the carrier {carrier}") + + # set original links to be unidirectional + n.links.loc[carrier_i, "p_min_pu"] = 0 + + # add a new links that mirror the original links, but represent the reversed flow direction + # the new links have a cost and length of 0 to not distort the overall cost and network length + rev_links = ( + n.links.loc[carrier_i].copy().rename({"bus0": "bus1", "bus1": "bus0"}, axis=1) + ) + rev_links["length_original"] = rev_links[ + "length" + ] # tracker for the length of the original links length + rev_links["capital_cost"] = 0 + rev_links["length"] = 0 + rev_links["reversed"] = True # tracker for easy identification of reversed links + rev_links.index = rev_links.index.map(lambda x: x + "-reversed") + + # add the new reversed links to the network and fill the newly created trackers with default values for the other links + n.links = pd.concat([n.links, rev_links], sort=False) + n.links["reversed"] = n.links["reversed"].fillna(False).infer_objects(copy=False) + n.links["length_original"] = n.links["length_original"].fillna(n.links.length) + + +def set_length_based_efficiency(n, carrier, bus_suffix, transmission_efficiency): + """ + Set the efficiency of all links of type carrier in network n based on their length and the values specified in the config. + Additionally add the length based electricity demand required for compression (if applicable). + The bus_suffix refers to the suffix that differentiates the links bus0 from the corresponding electricity bus, i.e. " H2". + Important: + Call this function AFTER lossy_bidirectional_links when creating links that are both bidirectional and lossy, + and have a length based electricity demand for compression. Otherwise the compression will not consistently take place at + the inflow bus and instead vary between the inflow and the outflow bus. + """ + + # get the links length based efficiency and required compression + if carrier not in transmission_efficiency: + raise KeyError( + f"An error occurred when setting the length based efficiency for the Links of type {carrier}." + f"The Link type {carrier} was not found in the config under config['sector']['transmission_efficiency']." + ) + efficiencies = transmission_efficiency[carrier] + efficiency_static = efficiencies.get("efficiency_static", 1) + efficiency_per_1000km = efficiencies.get("efficiency_per_1000km", 1) + compression_per_1000km = efficiencies.get("compression_per_1000km", 0) + + # indetify all links of type carrier + carrier_i = n.links.loc[n.links.carrier == carrier].index + + # identify the lengths of all links of type carrier + # use "length_original" for lossy bidirectional links and "length" for any other link + if ("reversed" in n.links.columns) and any(n.links.loc[carrier_i, "reversed"]): + lengths = n.links.loc[carrier_i, "length_original"] + else: + lengths = n.links.loc[carrier_i, "length"] + + # set the links' length based efficiency + n.links.loc[carrier_i, "efficiency"] = ( + efficiency_static * efficiency_per_1000km ** (lengths / 1e3) + ) + + # set the links's electricity demand for compression + if compression_per_1000km > 0: + # connect the links to their corresponding electricity buses + n.links.loc[carrier_i, "bus2"] = n.links.loc[ + carrier_i, "bus0" + ].str.removesuffix(bus_suffix) + # TODO: use these lines to set bus 2 instead, once n.buses.location is functional and remove bus_suffix. + """ + n.links.loc[carrier_i, "bus2"] = n.links.loc[carrier_i, "bus0"].map( + n.buses.location + ) # electricity + """ + # set the required compression demand + n.links.loc[carrier_i, "efficiency2"] = -compression_per_1000km * lengths / 1e3 diff --git a/scripts/add_export.py b/scripts/add_export.py index ba7b2442e..d8a860b82 100644 --- a/scripts/add_export.py +++ b/scripts/add_export.py @@ -47,13 +47,13 @@ def select_ports(n): logger.error( "No export ports chosen, please add ports to the file data/export_ports.csv" ) - gadm_level = snakemake.params.gadm_level + gadm_layer_id = snakemake.params.gadm_layer_id - ports["gadm_{}".format(gadm_level)] = ports[["x", "y", "country"]].apply( + ports["gadm_{}".format(gadm_layer_id)] = ports[["x", "y", "country"]].apply( lambda port: locate_bus( port[["x", "y"]], port["country"], - gadm_level, + gadm_layer_id, snakemake.input["shapes_path"], snakemake.params.alternative_clustering, ), @@ -62,7 +62,7 @@ def select_ports(n): # TODO: revise if ports quantity and property by shape become relevant # drop duplicated entries - gcol = "gadm_{}".format(gadm_level) + gcol = "gadm_{}".format(gadm_layer_id) ports_sel = ports.loc[~ports[gcol].duplicated(keep="first")].set_index(gcol) # Select the hydrogen buses based on nodes with ports diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 2c317c305..34b6c6a6a 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -57,7 +57,13 @@ import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging, create_logger +from _helpers import ( + configure_logging, + create_logger, + lossy_bidirectional_links, + override_component_attrs, + set_length_based_efficiency, +) from add_electricity import ( _add_missing_carriers_from_costs, add_nice_carrier_names, @@ -225,7 +231,7 @@ def attach_stores(n, costs, config): ) -def attach_hydrogen_pipelines(n, costs, config): +def attach_hydrogen_pipelines(n, costs, config, transmission_efficiency): elec_opts = config["electricity"] ext_carriers = elec_opts["extendable_carriers"] as_stores = ext_carriers.get("Store", []) @@ -261,10 +267,15 @@ def attach_hydrogen_pipelines(n, costs, config): p_nom_extendable=True, length=h2_links.length.values, capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length, - efficiency=costs.at["H2 pipeline", "efficiency"], carrier="H2 pipeline", ) + # split the pipeline into two unidirectional links to properly apply transmission losses in both directions. + lossy_bidirectional_links(n, "H2 pipeline") + + # set the pipelines efficiency and the electricity required by the pipeline for compression + set_length_based_efficiency(n, "H2 pipeline", " H2", transmission_efficiency) + if __name__ == "__main__": if "snakemake" not in globals(): @@ -274,8 +285,10 @@ def attach_hydrogen_pipelines(n, costs, config): configure_logging(snakemake) - n = pypsa.Network(snakemake.input.network) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) Nyears = n.snapshot_weightings.objective.sum() / 8760.0 + transmission_efficiency = snakemake.params.transmission_efficiency config = snakemake.config costs = load_costs( @@ -287,7 +300,7 @@ def attach_hydrogen_pipelines(n, costs, config): attach_storageunits(n, costs, config) attach_stores(n, costs, config) - attach_hydrogen_pipelines(n, costs, config) + attach_hydrogen_pipelines(n, costs, config, transmission_efficiency) add_nice_carrier_names(n, config=snakemake.config) diff --git a/scripts/base_network.py b/scripts/base_network.py index 65d640d44..e11ff83c6 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -523,20 +523,6 @@ def base_network( result_type="reduce", ) n.import_components_from_dataframe(lines_ac, "Line") - # The columns which names starts with "bus" are mixed up with the third-bus specification - # when executing additional_linkports() - lines_dc.drop( - labels=[ - "bus0_lon", - "bus0_lat", - "bus1_lon", - "bus1_lat", - "bus_0_coors", - "bus_1_coors", - ], - axis=1, - inplace=True, - ) n.import_components_from_dataframe(lines_dc, "Link") n.import_components_from_dataframe(transformers, "Transformer") diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index e5b3cf3ca..f9f4775fb 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -141,7 +141,7 @@ def get_gadm_shape( # when duplicates, keep only the first entry join_geos = join_geos[~join_geos.index.duplicated()] - gadm_sel = gadm_shapes.loc[join_geos["index_right"].values] + gadm_sel = gadm_shapes.loc[join_geos[gadm_shapes.index.name].values] return gadm_sel.geometry.values, gadm_sel.index.values diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index 48d7ed8c6..02a650150 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -21,7 +21,7 @@ gpd_version = StrictVersion(gpd.__version__) -def map_industry_to_buses(df, countries, gadm_level, shapes_path, gadm_clustering): +def map_industry_to_buses(df, countries, gadm_layer_id, shapes_path, gadm_clustering): """ Load hotmaps database of industrial sites and map onto bus regions. Build industrial demand... Change name and add other functions. @@ -31,18 +31,18 @@ def map_industry_to_buses(df, countries, gadm_level, shapes_path, gadm_clusterin Change hotmaps to more descriptive name, etc. """ df = df[df.country.isin(countries)] - df["gadm_{}".format(gadm_level)] = df[["x", "y", "country"]].apply( + df["gadm_{}".format(gadm_layer_id)] = df[["x", "y", "country"]].apply( lambda site: locate_bus( site[["x", "y"]].astype("float"), site["country"], - gadm_level, + gadm_layer_id, shapes_path, gadm_clustering, ), axis=1, ) - return df.set_index("gadm_" + str(gadm_level)) + return df.set_index("gadm_" + str(gadm_layer_id)) def build_nodal_distribution_key( @@ -131,7 +131,7 @@ def match_technology(df): regions = gpd.read_file(snakemake.input.regions_onshore) shapes_path = snakemake.input.shapes_path - gadm_level = snakemake.params.gadm_level + gadm_layer_id = snakemake.params.gadm_layer_id countries = snakemake.params.countries gadm_clustering = snakemake.params.alternative_clustering @@ -178,7 +178,7 @@ def match_technology(df): industrial_database = map_industry_to_buses( geo_locs[geo_locs.quality != "unavailable"], countries, - gadm_level, + gadm_layer_id, shapes_path, gadm_clustering, ) diff --git a/scripts/build_osm_network.py b/scripts/build_osm_network.py index b4e7e6f4c..de88be8e5 100644 --- a/scripts/build_osm_network.py +++ b/scripts/build_osm_network.py @@ -26,6 +26,27 @@ logger = create_logger(__name__) +# Keep only a predefined set of columns, as otherwise conflicts are possible +# e.g. the columns which names starts with "bus" are mixed up with +# the third-bus specification when executing additional_linkports() +LINES_COLUMNS = [ + "line_id", + "circuits", + "tag_type", + "voltage", + "bus0", + "bus1", + "length", + "underground", + "under_construction", + "tag_frequency", + "dc", + "country", + "geometry", + "bounds", +] + + def line_endings_to_bus_conversion(lines): # Assign to every line a start and end point @@ -554,21 +575,17 @@ def fix_overpassing_lines(lines, buses, distance_crs, tol=1): buffer_df = df_p.buffer(tol).to_frame() # Spatial join to find lines intersecting point buffers - joined = gpd.sjoin(df_l, buffer_df, how="inner", op="intersects") + joined = gpd.sjoin(df_l, buffer_df, how="inner", predicate="intersects") # group lines by their index group_lines = joined.groupby(level=0) # iterate over the groups, TODO: change to apply for i, group in group_lines: - line_id = df_l.loc[i, line_id_str] # pick the line id that represents the group line_geom = df_l.loc[i, "geometry"] - # number of points that intersect with the line - num_points = len(group) - # get the indices of the points that intersect with the line - points_indexes = group["index_right"].tolist() + points_indexes = group[buffer_df.index.name].tolist() # get the geometries of the points that intersect with the line all_points = df_p.loc[points_indexes, "geometry"] @@ -720,6 +737,7 @@ def built_network( countries_config, geo_crs, distance_crs, + lines_cols_standard, force_ac=False, ): logger.info("Stage 1/5: Read input data") @@ -784,6 +802,8 @@ def built_network( if not os.path.exists(outputs["lines"]): os.makedirs(os.path.dirname(outputs["lines"]), exist_ok=True) + lines = lines[lines_cols_standard] + to_csv_nafix(lines, outputs["lines"]) # Generate CSV to_csv_nafix(converters, outputs["converters"]) # Generate CSV to_csv_nafix(transformers, outputs["transformers"]) # Generate CSV @@ -819,5 +839,6 @@ def built_network( countries, geo_crs, distance_crs, + lines_cols_standard=LINES_COLUMNS, force_ac=force_ac, ) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 3e0e8bd5a..a4247e64b 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -22,6 +22,7 @@ BASE_DIR, configure_logging, create_logger, + save_to_geojson, three_2_two_digits_country, two_2_three_digits_country, two_digits_2_name_country, @@ -305,23 +306,6 @@ def country_cover(country_shapes, eez_shapes=None, out_logging=False, distance=0 return africa_shape -def save_to_geojson(df, fn): - if os.path.exists(fn): - os.unlink(fn) # remove file if it exists - if not isinstance(df, gpd.GeoDataFrame): - df = gpd.GeoDataFrame(dict(geometry=df)) - - # save file if the GeoDataFrame is non-empty - if df.shape[0] > 0: - df = df.reset_index() - schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} - df.to_file(fn, driver="GeoJSON", schema=schema) - else: - # create empty file to avoid issues with snakemake - with open(fn, "w") as fp: - pass - - def load_EEZ(countries_codes, geo_crs, EEZ_gpkg="./data/eez/eez_v11.gpkg"): """ Function to load the database of the Exclusive Economic Zones. diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 63ae6556c..562b9cfa7 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -134,6 +134,7 @@ configure_logging, create_logger, get_aggregation_strategies, + update_config_dictionary, update_p_nom_max, ) from add_electricity import load_costs @@ -378,7 +379,7 @@ def n_bounds(model, *n_id): ) -def busmap_for_gadm_clusters(inputs, n, gadm_level, geo_crs, country_list): +def busmap_for_gadm_clusters(inputs, n, gadm_layer_id, geo_crs, country_list): gdf = gpd.read_file(inputs.gadm_shapes) def locate_bus(coords, co): @@ -394,12 +395,12 @@ def locate_bus(coords, co): ]["GADM_ID"].item() buses = n.buses - buses["gadm_{}".format(gadm_level)] = buses[["x", "y", "country"]].apply( + buses["gadm_{}".format(gadm_layer_id)] = buses[["x", "y", "country"]].apply( lambda bus: locate_bus(bus[["x", "y"]], bus["country"]), axis=1 ) buses["gadm_subnetwork"] = ( - buses["gadm_{}".format(gadm_level)] + "_" + buses["carrier"].astype(str) + buses["gadm_{}".format(gadm_layer_id)] + "_" + buses["carrier"].astype(str) ) busmap = buses["gadm_subnetwork"] @@ -575,9 +576,10 @@ def clustering_for_n_clusters( extended_link_costs=0, focus_weights=None, ): - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) if not isinstance(custom_busmap, pd.Series): if alternative_clustering: @@ -603,12 +605,14 @@ def clustering_for_n_clusters( clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -630,14 +634,6 @@ def clustering_for_n_clusters( return clustering -def save_to_geojson(s, fn): - if os.path.exists(fn): - os.unlink(fn) - df = s.reset_index() - schema = {**gpd.io.file.infer_schema(df), "geometry": "Unknown"} - df.to_file(fn, driver="GeoJSON", schema=schema) - - def cluster_regions(busmaps, inputs, output): busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) @@ -703,9 +699,7 @@ def cluster_regions(busmaps, inputs, output): # Fast-path if no clustering is necessary busmap = n.buses.index.to_series() linemap = n.lines.index.to_series() - clustering = pypsa.clustering.spatial.Clustering( - n, busmap, linemap, linemap, pd.Series(dtype="O") - ) + clustering = pypsa.clustering.spatial.Clustering(n, busmap, linemap) elif len(n.buses) < n_clusters: logger.error( f"Desired number of clusters ({n_clusters}) higher than the number of buses ({len(n.buses)})" @@ -727,14 +721,27 @@ def consense(x): ).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!" return v - aggregation_strategies = snakemake.params.cluster_options.get( - "aggregation_strategies", {} + aggregation_strategies = snakemake.params.aggregation_strategies + + # Aggregation strategies must be set for all columns + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="lines", + dict_to_use={"v_nom": "first", "geometry": "first", "bounds": "first"}, ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="buses", + dict_to_use={ + "v_nom": "first", + "lat": "mean", + "lon": "mean", + "tag_substation": "first", + "tag_area": "first", + "country": "first", + }, + ) + custom_busmap = False # snakemake.params.custom_busmap custom busmap is depreciated https://github.com/pypsa-meets-earth/pypsa-earth/pull/694 if custom_busmap: busmap = pd.read_csv( diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 880dafef7..cc1dc099b 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1022,23 +1022,23 @@ def add_aviation(n, cost): airports = pd.read_csv(snakemake.input.airports, keep_default_na=False) airports = airports[airports.country.isin(countries)] - gadm_level = options["gadm_level"] + gadm_layer_id = snakemake.config["build_shape_options"]["gadm_layer_id"] - airports["gadm_{}".format(gadm_level)] = airports[["x", "y", "country"]].apply( + airports["gadm_{}".format(gadm_layer_id)] = airports[["x", "y", "country"]].apply( lambda airport: locate_bus( airport[["x", "y"]], airport["country"], - gadm_level, + gadm_layer_id, snakemake.input.shapes_path, snakemake.config["cluster_options"]["alternative_clustering"], ), axis=1, ) # To change 3 country code to 2 - # airports["gadm_{}".format(gadm_level)] = airports["gadm_{}".format(gadm_level)].apply( + # airports["gadm_{}".format(gadm_layer_id)] = airports["gadm_{}".format(gadm_layer_id)].apply( # lambda cocode: three_2_two_digits_country(cocode[:3]) + " " + cocode[4:-2]) - airports = airports.set_index("gadm_{}".format(gadm_level)) + airports = airports.set_index("gadm_{}".format(gadm_layer_id)) ind = pd.DataFrame(n.buses.index[n.buses.carrier == "AC"]) @@ -1290,7 +1290,7 @@ def add_shipping(n, costs): ).squeeze() ports = ports[ports.country.isin(countries)] - gadm_level = options["gadm_level"] + gadm_layer_id = snakemake.config["build_shape_options"]["gadm_layer_id"] all_navigation = ["total international navigation", "total domestic navigation"] @@ -1307,18 +1307,18 @@ def add_shipping(n, costs): options["shipping_hydrogen_share"], demand_sc + "_" + str(investment_year) ) - ports["gadm_{}".format(gadm_level)] = ports[["x", "y", "country"]].apply( + ports["gadm_{}".format(gadm_layer_id)] = ports[["x", "y", "country"]].apply( lambda port: locate_bus( port[["x", "y"]], port["country"], - gadm_level, + gadm_layer_id, snakemake.input["shapes_path"], snakemake.config["cluster_options"]["alternative_clustering"], ), axis=1, ) - ports = ports.set_index("gadm_{}".format(gadm_level)) + ports = ports.set_index("gadm_{}".format(gadm_layer_id)) ind = pd.DataFrame(n.buses.index[n.buses.carrier == "AC"]) ind = ind.set_index(n.buses.index[n.buses.carrier == "AC"]) diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 92c3dd340..502cf1b9d 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -96,13 +96,12 @@ from _helpers import ( configure_logging, create_logger, - get_aggregation_strategies, + update_config_dictionary, update_p_nom_max, ) from add_electricity import load_costs from cluster_network import cluster_regions, clustering_for_n_clusters from pypsa.clustering.spatial import ( - aggregategenerators, aggregateoneport, busmap_by_stubs, get_clustering_from_busmap, @@ -276,11 +275,15 @@ def replace_components(n, c, df, pnl): _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output) - _, generator_strategies = get_aggregation_strategies(aggregation_strategies) + generator_strategies = aggregation_strategies["generators"] carriers = set(n.generators.carrier) - set(exclude_carriers) - generators, generators_pnl = aggregategenerators( - n, busmap, carriers=carriers, custom_strategies=generator_strategies + generators, generators_pnl = aggregateoneport( + n, + busmap, + "Generator", + carriers=carriers, + custom_strategies=generator_strategies, ) replace_components(n, "Generator", generators, generators_pnl) @@ -588,19 +591,22 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): if not dist.empty: busmap.loc[buses_i] = dist.idxmin(1) - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) return clustering.network, busmap @@ -848,19 +854,22 @@ def merge_into_network(n, threshold, aggregation_strategies=dict()): if (busmap.index == busmap).all(): return n, n.buses.index.to_series() - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -934,19 +943,22 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): if (busmap.index == busmap).all(): return n, n.buses.index.to_series() - bus_strategies, generator_strategies = get_aggregation_strategies( - aggregation_strategies - ) + line_strategies = aggregation_strategies.get("lines", dict()) + bus_strategies = aggregation_strategies.get("buses", dict()) + generator_strategies = aggregation_strategies.get("generators", dict()) + one_port_strategies = aggregation_strategies.get("one_ports", dict()) clustering = get_clustering_from_busmap( n, busmap, - bus_strategies=bus_strategies, aggregate_generators_weighted=True, aggregate_generators_carriers=None, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=1.0, + line_strategies=line_strategies, + bus_strategies=bus_strategies, generator_strategies=generator_strategies, + one_port_strategies=one_port_strategies, scale_link_capital_costs=False, ) @@ -976,14 +988,27 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): "exclude_carriers", [] ) hvdc_as_lines = snakemake.params.electricity["hvdc_as_lines"] - aggregation_strategies = snakemake.params.cluster_options.get( - "aggregation_strategies", {} + aggregation_strategies = snakemake.params.aggregation_strategies + + # Aggregation strategies must be set for all columns + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="lines", + dict_to_use={"v_nom": "first", "geometry": "first", "bounds": "first"}, ) - # translate str entries of aggregation_strategies to pd.Series functions: - aggregation_strategies = { - p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()} - for p in aggregation_strategies.keys() - } + update_config_dictionary( + config_dict=aggregation_strategies, + parameter_key_to_fill="buses", + dict_to_use={ + "v_nom": "first", + "lat": "mean", + "lon": "mean", + "tag_substation": "first", + "tag_area": "first", + "country": "first", + }, + ) + n, trafo_map = simplify_network_to_base_voltage(n, linetype, base_voltage) Nyears = n.snapshot_weightings.objective.sum() / 8760 @@ -1088,7 +1113,7 @@ def merge_isolated_nodes(n, threshold, aggregation_strategies=dict()): solver_name, cluster_config.get("algorithm", "hac"), cluster_config.get("feature", None), - aggregation_strategies, + aggregation_strategies=aggregation_strategies, ) busmaps.append(cluster_map) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 88bdc6738..2541ae9b2 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -52,15 +52,15 @@ linear optimal power flow (plus investment planning) is provided in the `documentation of PyPSA `_. -The optimization is based on the ``pyomo=False`` setting in the :func:`network.lopf` and :func:`pypsa.linopf.ilopf` function. -Additionally, some extra constraints specified in :mod:`prepare_network` are added. +The optimization is based on the :func:`network.optimize` function. +Additionally, some extra constraints specified in :mod:`prepare_network` and :mod:`solve_network` are added. Solving the network in multiple iterations is motivated through the dependence of transmission line capacities and impedances on values of corresponding flows. As lines are expanded their electrical parameters change, which renders the optimisation bilinear even if the power flow equations are linearized. To retain the computational advantage of continuous linear programming, a sequential linear programming technique is used, where in between iterations the line impedances are updated. -Details (and errors made through this heuristic) are discussed in the paper +Details (and errors introduced through this heuristic) are discussed in the paper - Fabian Neumann and Tom Brown. `Heuristics for Transmission Expansion Planning in Low-Carbon Energy System Models `_), *16th International Conference on the European Energy Market*, 2019. `arXiv:1907.10548 `_. @@ -85,24 +85,18 @@ import numpy as np import pandas as pd import pypsa +import xarray as xr from _helpers import configure_logging, create_logger, override_component_attrs +from linopy import merge from pypsa.descriptors import get_switchable_as_dense as get_as_dense -from pypsa.linopf import ( - define_constraints, - define_variables, - get_var, - ilopf, - join_exprs, - linexpr, - network_lopf, -) -from pypsa.linopt import define_constraints, get_var, join_exprs, linexpr +from pypsa.optimization.abstract import optimize_transmission_expansion_iteratively +from pypsa.optimization.optimize import optimize logger = create_logger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) -def prepare_network(n, solve_opts): +def prepare_network(n, solve_opts, config): if "clip_p_max_pu" in solve_opts: for df in ( n.generators_t.p_max_pu, @@ -159,6 +153,25 @@ def prepare_network(n, solve_opts): def add_CCL_constraints(n, config): + """ + Add CCL (country & carrier limit) constraint to the network. + + Add minimum and maximum levels of generator nominal capacity per carrier + for individual countries. Opts and path for agg_p_nom_minmax.csv must be defined + in config.yaml. Default file is available at data/agg_p_nom_minmax.csv. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + scenario: + opts: [Co2L-CCL-24H] + electricity: + agg_p_nom_limits: data/agg_p_nom_minmax.csv + """ agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits") try: @@ -174,32 +187,57 @@ def add_CCL_constraints(n, config): ) gen_country = n.generators.bus.map(n.buses.country) - # cc means country and carrier - p_nom_per_cc = ( - pd.DataFrame( - { - "p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))), - "country": gen_country, - "carrier": n.generators.carrier, - } + capacity_variable = n.model["Generator-p_nom"] + + lhs = [] + ext_carriers = n.generators.query("p_nom_extendable").carrier.unique() + for c in ext_carriers: + ext_carrier = n.generators.query("p_nom_extendable and carrier == @c") + country_grouper = ( + ext_carrier.bus.map(n.buses.country) + .rename_axis("Generator-ext") + .rename("country") ) - .dropna(subset=["p_nom"]) - .groupby(["country", "carrier"]) - .p_nom.apply(join_exprs) + ext_carrier_per_country = capacity_variable.loc[ + country_grouper.index + ].groupby_sum(country_grouper) + lhs.append(ext_carrier_per_country) + lhs = merge(lhs, dim=pd.Index(ext_carriers, name="carrier")) + + min_matrix = agg_p_nom_minmax["min"].to_xarray().unstack().reindex_like(lhs) + max_matrix = agg_p_nom_minmax["max"].to_xarray().unstack().reindex_like(lhs) + + n.model.add_constraints( + lhs >= min_matrix, name="agg_p_nom_min", mask=min_matrix.notnull() + ) + n.model.add_constraints( + lhs <= max_matrix, name="agg_p_nom_max", mask=max_matrix.notnull() ) - minimum = agg_p_nom_minmax["min"].dropna() - if not minimum.empty: - minconstraint = define_constraints( - n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min" - ) - maximum = agg_p_nom_minmax["max"].dropna() - if not maximum.empty: - maxconstraint = define_constraints( - n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max" - ) def add_EQ_constraints(n, o, scaling=1e-1): + """ + Add equity constraints to the network. + + Currently this is only implemented for the electricity sector only. + + Opts must be specified in the config.yaml. + + Parameters + ---------- + n : pypsa.Network + o : str + + Example + ------- + scenario: + opts: [Co2L-EQ0.7-24h] + + Require each country or node to on average produce a minimal share + of its total electricity consumption itself. Example: EQ0.7c demands each country + to produce on average at least 70% of its consumption; EQ0.7 demands + each node to produce on average at least 70% of its consumption. + """ float_regex = "[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) if o[-1] == "c": @@ -220,99 +258,149 @@ def add_EQ_constraints(n, o, scaling=1e-1): ) inflow = inflow.reindex(load.index).fillna(0.0) rhs = scaling * (level * load - inflow) + dispatch_variable = n.model["Generator-p"] lhs_gen = ( - linexpr( - (n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T) - ) - .T.groupby(ggrouper, axis=1) - .apply(join_exprs) + (dispatch_variable * (n.snapshot_weightings.generators * scaling)) + .groupby(ggrouper.to_xarray()) + .sum() + .sum("snapshot") ) - lhs_spill = ( - linexpr( - ( - -n.snapshot_weightings.stores * scaling, - get_var(n, "StorageUnit", "spill").T, - ) + # the current formulation implies that the available hydro power is (inflow - spillage) + # it implies efficiency_dispatch is 1 which is not quite general + # see https://github.com/pypsa-meets-earth/pypsa-earth/issues/1245 for possible improvements + if not n.storage_units_t.inflow.empty: + spillage_variable = n.model["StorageUnit-spill"] + lhs_spill = ( + (spillage_variable * (-n.snapshot_weightings.stores * scaling)) + .groupby_sum(sgrouper) + .groupby(sgrouper.to_xarray()) + .sum() + .sum("snapshot") ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("") - lhs = lhs_gen + lhs_spill - define_constraints(n, lhs, ">=", rhs, "equity", "min") + lhs = lhs_gen + lhs_spill + else: + lhs = lhs_gen + n.model.add_constraints(lhs >= rhs, name="equity_min") def add_BAU_constraints(n, config): - ext_c = n.generators.query("p_nom_extendable").carrier.unique() - mincaps = pd.Series( - config["electricity"].get("BAU_mincapacities", {key: 0 for key in ext_c}) - ) - lhs = ( - linexpr((1, get_var(n, "Generator", "p_nom"))) - .groupby(n.generators.carrier) - .apply(join_exprs) - ) - define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps") - - maxcaps = pd.Series( - config["electricity"].get("BAU_maxcapacities", {key: np.inf for key in ext_c}) - ) - lhs = ( - linexpr((1, get_var(n, "Generator", "p_nom"))) - .groupby(n.generators.carrier) - .apply(join_exprs) - ) - define_constraints(n, lhs, "<=", maxcaps[lhs.index], "Carrier", "bau_maxcaps") + """ + Add a per-carrier minimal overall capacity. + + BAU_mincapacities and opts must be adjusted in the config.yaml. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + scenario: + opts: [Co2L-BAU-24h] + electricity: + BAU_mincapacities: + solar: 0 + onwind: 0 + OCGT: 100000 + offwind-ac: 0 + offwind-dc: 0 + Which sets minimum expansion across all nodes e.g. in Europe to 100GW. + OCGT bus 1 + OCGT bus 2 + ... > 100000 + """ + mincaps = pd.Series(config["electricity"]["BAU_mincapacities"]) + p_nom = n.model["Generator-p_nom"] + ext_i = n.generators.query("p_nom_extendable") + ext_carrier_i = xr.DataArray(ext_i.carrier.rename_axis("Generator-ext")) + lhs = p_nom.groupby(ext_carrier_i).sum() + rhs = mincaps[lhs.indexes["carrier"]].rename_axis("carrier") + n.model.add_constraints(lhs >= rhs, name="bau_mincaps") def add_SAFE_constraints(n, config): - peakdemand = ( - 1.0 + config["electricity"]["SAFE_reservemargin"] - ) * n.loads_t.p_set.sum(axis=1).max() - conv_techs = config["plotting"]["conv_techs"] + """ + Add a capacity reserve margin of a certain fraction above the peak demand. + Renewable generators and storage do not contribute. Ignores network. + + Parameters + ---------- + n : pypsa.Network + config : dict + + Example + ------- + config.yaml requires to specify opts: + + scenario: + opts: [Co2L-SAFE-24h] + electricity: + SAFE_reservemargin: 0.1 + Which sets a reserve margin of 10% above the peak demand. + """ + peakdemand = n.loads_t.p_set.sum(axis=1).max() + margin = 1.0 + config["electricity"]["SAFE_reservemargin"] + reserve_margin = peakdemand * margin + conventional_carriers = config["electricity"]["conventional_carriers"] + ext_gens_i = n.generators.query( + "carrier in @conventional_carriers & p_nom_extendable" + ).index + capacity_variable = n.model["Generator-p_nom"] + p_nom = n.model["Generator-p_nom"].loc[ext_gens_i] + lhs = p_nom.sum() exist_conv_caps = n.generators.query( - "~p_nom_extendable & carrier in @conv_techs" + "~p_nom_extendable & carrier in @conventional_carriers" ).p_nom.sum() - ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index - lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum() - rhs = peakdemand - exist_conv_caps - define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap") + rhs = reserve_margin - exist_conv_caps + n.model.add_constraints(lhs >= rhs, name="safe_mintotalcap") -def add_operational_reserve_margin_constraint(n, config): +def add_operational_reserve_margin_constraint(n, sns, config): + """ + Build reserve margin constraints based on the formulation + as suggested in GenX + https://energy.mit.edu/wp-content/uploads/2017/10/Enhanced-Decision-Support-for-a-Changing-Electricity-Landscape.pdf + It implies that the reserve margin also accounts for optimal + dispatch of distributed energy resources (DERs) and demand response + which is a novel feature of GenX. + """ reserve_config = config["electricity"]["operational_reserve"] EPSILON_LOAD = reserve_config["epsilon_load"] EPSILON_VRES = reserve_config["epsilon_vres"] CONTINGENCY = reserve_config["contingency"] # Reserve Variables - reserve = get_var(n, "Generator", "r") - lhs = linexpr((1, reserve)).sum(1) + n.model.add_variables( + 0, np.inf, coords=[sns, n.generators.index], name="Generator-r" + ) + reserve = n.model["Generator-r"] + summed_reserve = reserve.sum("Generator") # Share of extendable renewable capacities ext_i = n.generators.query("p_nom_extendable").index vres_i = n.generators_t.p_max_pu.columns if not ext_i.empty and not vres_i.empty: capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)] - renewable_capacity_variables = get_var(n, "Generator", "p_nom")[ - vres_i.intersection(ext_i) - ] - lhs += linexpr( - (-EPSILON_VRES * capacity_factor, renewable_capacity_variables) - ).sum(1) + p_nom_vres = ( + n.model["Generator-p_nom"] + .loc[vres_i.intersection(ext_i)] + .rename({"Generator-ext": "Generator"}) + ) + lhs = summed_reserve + ( + p_nom_vres * (-EPSILON_VRES * xr.DataArray(capacity_factor)) + ).sum("Generator") - # Total demand at t - demand = n.loads_t.p.sum(1) + # Total demand per t + demand = get_as_dense(n, "Load", "p_set").sum(axis=1) # VRES potential of non extendable generators capacity_factor = n.generators_t.p_max_pu[vres_i.difference(ext_i)] renewable_capacity = n.generators.p_nom[vres_i.difference(ext_i)] - potential = (capacity_factor * renewable_capacity).sum(1) + potential = (capacity_factor * renewable_capacity).sum(axis=1) # Right-hand-side rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY - define_constraints(n, lhs, ">=", rhs, "Reserve margin") + n.model.add_constraints(lhs >= rhs, name="reserve_margin") def update_capacity_constraint(n): @@ -320,171 +408,153 @@ def update_capacity_constraint(n): ext_i = n.generators.query("p_nom_extendable").index fix_i = n.generators.query("not p_nom_extendable").index - dispatch = get_var(n, "Generator", "p") - reserve = get_var(n, "Generator", "r") + dispatch = n.model["Generator-p"] + reserve = n.model["Generator-r"] capacity_fixed = n.generators.p_nom[fix_i] p_max_pu = get_as_dense(n, "Generator", "p_max_pu") - lhs = linexpr((1, dispatch), (1, reserve)) + lhs = dispatch + reserve + # TODO check if `p_max_pu[ext_i]` is safe for empty `ext_i` and drop if cause in case if not ext_i.empty: - capacity_variable = get_var(n, "Generator", "p_nom") - lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex( - columns=gen_i, fill_value="" + capacity_variable = n.model["Generator-p_nom"].rename( + {"Generator-ext": "Generator"} ) + lhs = dispatch + reserve - capacity_variable * xr.DataArray(p_max_pu[ext_i]) rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0) - define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint") + n.model.add_constraints(lhs <= rhs, name="gen_updated_capacity_constraint") def add_operational_reserve_margin(n, sns, config): """ - Build reserve margin constraints based on the formulation given in - https://genxproject.github.io/GenX/dev/core/#Reserves. + Parameters + ---------- + n : pypsa.Network + sns: pd.DatetimeIndex + config : dict + + Example: + -------- + config.yaml requires to specify operational_reserve: + operational_reserve: # like https://genxproject.github.io/GenX/dev/core/#Reserves + activate: true + epsilon_load: 0.02 # percentage of load at each snapshot + epsilon_vres: 0.02 # percentage of VRES at each snapshot + contingency: 400000 # MW """ - define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index]) - - add_operational_reserve_margin_constraint(n, config) + add_operational_reserve_margin_constraint(n, sns, config) update_capacity_constraint(n) def add_battery_constraints(n): - nodes = n.buses.index[n.buses.carrier == "battery"] - if nodes.empty or ("Link", "p_nom") not in n.variables.index: + """ + Add constraint ensuring that charger = discharger, i.e. + 1 * charger_size - efficiency * discharger_size = 0 + """ + if not n.links.p_nom_extendable.any(): return - link_p_nom = get_var(n, "Link", "p_nom") - lhs = linexpr( - (1, link_p_nom[nodes + " charger"]), - ( - -n.links.loc[nodes + " discharger", "efficiency"].values, - link_p_nom[nodes + " discharger"].values, - ), + + discharger_bool = n.links.index.str.contains("battery discharger") + charger_bool = n.links.index.str.contains("battery charger") + + dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index + chargers_ext = n.links[charger_bool].query("p_nom_extendable").index + + eff = n.links.efficiency[dischargers_ext].values + lhs = ( + n.model["Link-p_nom"].loc[chargers_ext] + - n.model["Link-p_nom"].loc[dischargers_ext] * eff ) - define_constraints(n, lhs, "=", 0, "Link", "charger_ratio") + n.model.add_constraints(lhs == 0, name="Link-charger_ratio") -def add_RES_constraints(n, res_share): - lgrouper = n.loads.bus.map(n.buses.country) - ggrouper = n.generators.bus.map(n.buses.country) - sgrouper = n.storage_units.bus.map(n.buses.country) - cgrouper = n.links.bus0.map(n.buses.country) + +def add_RES_constraints(n, res_share, config): + """ + The constraint ensures that a predefined share of power is generated + by renewable sources + + Parameters + ---------- + n : pypsa.Network + res_share: float + config : dict + """ logger.warning( - "The add_RES_constraints functionality is still work in progress. " + "The add_RES_constraints() is still work in progress. " "Unexpected results might be incurred, particularly if " "temporal clustering is applied or if an unexpected change of technologies " - "is subject to the obtimisation." + "is subject to future improvements." ) + renew_techs = config["electricity"]["renewable_carriers"] + + charger = ["H2 electrolysis", "battery charger"] + discharger = ["H2 fuel cell", "battery discharger"] + + ren_gen = n.generators.query("carrier in @renew_techs") + ren_stores = n.storage_units.query("carrier in @renew_techs") + ren_charger = n.links.query("carrier in @charger") + ren_discharger = n.links.query("carrier in @discharger") + + gens_i = ren_gen.index + stores_i = ren_stores.index + charger_i = ren_charger.index + discharger_i = ren_discharger.index + + stores_t_weights = n.snapshot_weightings.stores + + lgrouper = n.loads.bus.map(n.buses.country) + ggrouper = ren_gen.bus.map(n.buses.country) + sgrouper = ren_stores.bus.map(n.buses.country) + cgrouper = ren_charger.bus0.map(n.buses.country) + dgrouper = ren_discharger.bus0.map(n.buses.country) + load = ( n.snapshot_weightings.generators @ n.loads_t.p_set.groupby(lgrouper, axis=1).sum() ) - rhs = res_share * load - res_techs = [ - "solar", - "onwind", - "offwind-dc", - "offwind-ac", - "battery", - "hydro", - "ror", - ] - charger = ["H2 electrolysis", "battery charger"] - discharger = ["H2 fuel cell", "battery discharger"] - - gens_i = n.generators.query("carrier in @res_techs").index - stores_i = n.storage_units.query("carrier in @res_techs").index - charger_i = n.links.query("carrier in @charger").index - discharger_i = n.links.query("carrier in @discharger").index - # Generators lhs_gen = ( - linexpr( - (n.snapshot_weightings.generators, get_var(n, "Generator", "p")[gens_i].T) - ) - .T.groupby(ggrouper, axis=1) - .apply(join_exprs) + (n.model["Generator-p"].loc[:, gens_i] * n.snapshot_weightings.generators) + .groupby(ggrouper.to_xarray()) + .sum() ) # StorageUnits - lhs_dispatch = ( - ( - linexpr( - ( - n.snapshot_weightings.stores, - get_var(n, "StorageUnit", "p_dispatch")[stores_i].T, - ) - ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") + store_disp_expr = ( + n.model["StorageUnit-p_dispatch"].loc[:, stores_i] * stores_t_weights ) - - lhs_store = ( - ( - linexpr( - ( - -n.snapshot_weightings.stores, - get_var(n, "StorageUnit", "p_store")[stores_i].T, - ) - ) - .T.groupby(sgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") + store_expr = n.model["StorageUnit-p_store"].loc[:, stores_i] * stores_t_weights + charge_expr = n.model["Link-p"].loc[:, charger_i] * stores_t_weights.apply( + lambda r: r * n.links.loc[charger_i].efficiency ) + discharge_expr = n.model["Link-p"].loc[:, discharger_i] * stores_t_weights.apply( + lambda r: r * n.links.loc[discharger_i].efficiency + ) + + lhs_dispatch = store_disp_expr.groupby(sgrouper).sum() + lhs_store = store_expr.groupby(sgrouper).sum() # Stores (or their resp. Link components) # Note that the variables "p0" and "p1" currently do not exist. # Thus, p0 and p1 must be derived from "p" (which exists), taking into account the link efficiency. - lhs_charge = ( - ( - linexpr( - ( - -n.snapshot_weightings.stores, - get_var(n, "Link", "p")[charger_i].T, - ) - ) - .T.groupby(cgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") - ) + lhs_charge = charge_expr.groupby(cgrouper).sum() - lhs_discharge = ( - ( - linexpr( - ( - n.snapshot_weightings.stores.apply( - lambda r: r * n.links.loc[discharger_i].efficiency - ), - get_var(n, "Link", "p")[discharger_i], - ) - ) - .groupby(cgrouper, axis=1) - .apply(join_exprs) - ) - .reindex(lhs_gen.index) - .fillna("") - ) + lhs_discharge = discharge_expr.groupby(cgrouper).sum() - # signs of resp. terms are coded in the linexpr. - # todo: for links (lhs_charge and lhs_discharge), account for snapshot weightings - lhs = lhs_gen + lhs_dispatch + lhs_store + lhs_charge + lhs_discharge + lhs = lhs_gen + lhs_dispatch - lhs_store - lhs_charge + lhs_discharge - define_constraints(n, lhs, "=", rhs, "RES share") + n.model.add_constraints(lhs == rhs, name="res_share") def add_land_use_constraint(n): @@ -548,16 +618,21 @@ def _add_land_use_constraint_m(n): def add_h2_network_cap(n, cap): h2_network = n.links.loc[n.links.carrier == "H2 pipeline"] - if h2_network.index.empty or ("Link", "p_nom") not in n.variables.index: + if h2_network.index.empty: return - h2_network_cap = get_var(n, "Link", "p_nom") - subset_index = h2_network.index.intersection(h2_network_cap.index) - lhs = linexpr( - (h2_network.loc[subset_index, "length"], h2_network_cap[subset_index]) + h2_network_cap = n.model["Link-p_nom"] + h2_network_cap_index = h2_network_cap.indexes["Link-ext"] + subset_index = h2_network.index.intersection(h2_network_cap_index) + diff_index = h2_network_cap_index.difference(subset_index) + if len(diff_index) > 0: + logger.warning( + f"Impossible to set a limit for H2 pipelines extension for the following links: {diff_index}" + ) + lhs = ( + h2_network_cap.loc[subset_index] * h2_network.loc[subset_index, "length"] ).sum() - # lhs = linexpr((1, h2_network_cap[h2_network.index])).sum() rhs = cap * 1000 - define_constraints(n, lhs, "<=", rhs, "h2_network_cap") + n.model.add_constraints(lhs <= rhs, name="h2_network_cap") def H2_export_yearly_constraint(n): @@ -578,9 +653,10 @@ def H2_export_yearly_constraint(n): index=n.snapshots, columns=res_index, ) - res = join_exprs( - linexpr((weightings, get_var(n, "Generator", "p")[res_index])) - ) # single line sum + capacity_variable = n.model["Generator-p"] + + # single line sum + res = (weightings * capacity_variable.loc[res_index]).sum() load_ind = n.loads[n.loads.carrier == "AC"].index.intersection( n.loads_t.p_set.columns @@ -608,7 +684,7 @@ def H2_export_yearly_constraint(n): else: rhs = h2_export * (1 / 0.7) - con = define_constraints(n, lhs, ">=", rhs, "H2ExportConstraint", "RESproduction") + n.model.add_constraints(lhs >= rhs, name="H2ExportConstraint-RESproduction") def monthly_constraints(n, n_ref): @@ -631,15 +707,17 @@ def monthly_constraints(n, n_ref): index=n.snapshots, columns=res_index, ) + capacity_variable = n.model["Generator-p"] - res = linexpr((weightings, get_var(n, "Generator", "p")[res_index])).sum( - axis=1 - ) # single line sum + # single line sum + res = (weightings * capacity_variable[res_index]).sum(axis=1) res = res.groupby(res.index.month).sum() - electrolysis = get_var(n, "Link", "p")[ + link_p = n.model["Link-p"] + electrolysis = link_p.loc[ n.links.index[n.links.index.str.contains("H2 Electrolysis")] ] + weightings_electrolysis = pd.DataFrame( np.outer( n.snapshot_weightings["generators"], [1.0] * len(electrolysis.columns) @@ -648,7 +726,7 @@ def monthly_constraints(n, n_ref): columns=electrolysis.columns, ) - elec_input = linexpr((-allowed_excess * weightings_electrolysis, electrolysis)).sum( + elec_input = ((-allowed_excess * weightings_electrolysis) * electrolysis).sum( axis=1 ) @@ -671,16 +749,16 @@ def monthly_constraints(n, n_ref): for i in range(len(res.index)): lhs = res.iloc[i] + "\n" + elec_input.iloc[i] rhs = res_ref.iloc[i] + elec_input_ref.iloc[i] - con = define_constraints( - n, lhs, ">=", rhs, f"RESconstraints_{i}", f"REStarget_{i}" + n.model.add_constraints( + lhs >= rhs, name=f"RESconstraints_{i}-REStarget_{i}" ) else: for i in range(len(res.index)): lhs = res.iloc[i] + "\n" + elec_input.iloc[i] - con = define_constraints( - n, lhs, ">=", 0.0, f"RESconstraints_{i}", f"REStarget_{i}" + n.model.add_constraints( + lhs >= 0.0, name=f"RESconstraints_{i}-REStarget_{i}" ) # else: # logger.info("ignoring H2 export constraint as wildcard is set to 0") @@ -701,84 +779,72 @@ def add_chp_constraints(n): electric = n.links.index[electric_bool] heat = n.links.index[heat_bool] - electric_ext = n.links.index[electric_bool & n.links.p_nom_extendable] - heat_ext = n.links.index[heat_bool & n.links.p_nom_extendable] + electric_ext = n.links[electric_bool].query("p_nom_extendable").index + heat_ext = n.links[heat_bool].query("p_nom_extendable").index - electric_fix = n.links.index[electric_bool & ~n.links.p_nom_extendable] - heat_fix = n.links.index[heat_bool & ~n.links.p_nom_extendable] + electric_fix = n.links[electric_bool].query("~p_nom_extendable").index + heat_fix = n.links[heat_bool].query("~p_nom_extendable").index - link_p = get_var(n, "Link", "p") + p = n.model["Link-p"] # dimension: [time, link] + # output ratio between heat and electricity and top_iso_fuel_line for extendable if not electric_ext.empty: - link_p_nom = get_var(n, "Link", "p_nom") - - # ratio of output heat to electricity set by p_nom_ratio - lhs = linexpr( - ( - n.links.loc[electric_ext, "efficiency"] - * n.links.loc[electric_ext, "p_nom_ratio"], - link_p_nom[electric_ext], - ), - (-n.links.loc[heat_ext, "efficiency"].values, link_p_nom[heat_ext].values), - ) + p_nom = n.model["Link-p_nom"] - define_constraints(n, lhs, "=", 0, "chplink", "fix_p_nom_ratio") - - # top_iso_fuel_line for extendable - lhs = linexpr( - (1, link_p[heat_ext]), - (1, link_p[electric_ext].values), - (-1, link_p_nom[electric_ext].values), + lhs = ( + p_nom.loc[electric_ext] + * (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values + - p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values ) + n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio") - define_constraints(n, lhs, "<=", 0, "chplink", "top_iso_fuel_line_ext") + rename = {"Link-ext": "Link"} + lhs = ( + p.loc[:, electric_ext] + + p.loc[:, heat_ext] + - p_nom.rename(rename).loc[electric_ext] + ) + n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext") + # top_iso_fuel_line for fixed if not electric_fix.empty: - # top_iso_fuel_line for fixed - lhs = linexpr((1, link_p[heat_fix]), (1, link_p[electric_fix].values)) - - rhs = n.links.loc[electric_fix, "p_nom"].values - - define_constraints(n, lhs, "<=", rhs, "chplink", "top_iso_fuel_line_fix") + lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix] + rhs = n.links.p_nom[electric_fix] + n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix") + # back-pressure if not electric.empty: - # backpressure - lhs = linexpr( - ( - n.links.loc[electric, "c_b"].values * n.links.loc[heat, "efficiency"], - link_p[heat], - ), - (-n.links.loc[electric, "efficiency"].values, link_p[electric].values), + lhs = ( + p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values) + - p.loc[:, electric] * n.links.efficiency[electric] ) - - define_constraints(n, lhs, "<=", 0, "chplink", "backpressure") + n.model.add_constraints(lhs <= rhs, name="chplink-backpressure") def add_co2_sequestration_limit(n, sns): co2_stores = n.stores.loc[n.stores.carrier == "co2 stored"].index - if co2_stores.empty or ("Store", "e") not in n.variables.index: + if co2_stores.empty: return - vars_final_co2_stored = get_var(n, "Store", "e").loc[sns[-1], co2_stores] + vars_final_co2_stored = n.model["Store-e"].loc[sns[-1], co2_stores] - lhs = linexpr((1, vars_final_co2_stored)).sum() + lhs = (1 * vars_final_co2_stored).sum() rhs = ( n.config["sector"].get("co2_sequestration_potential", 5) * 1e6 ) # TODO change 200 limit (Europe) name = "co2_sequestration_limit" - define_constraints( - n, lhs, "<=", rhs, "GlobalConstraint", "mu", axes=pd.Index([name]), spec=name - ) + + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") def set_h2_colors(n): - blue_h2 = get_var(n, "Link", "p")[ + blue_h2 = n.model["Link-p"].loc[ n.links.index[n.links.index.str.contains("blue H2")] ] - pink_h2 = get_var(n, "Link", "p")[ + pink_h2 = n.model["Link-p"].loc[ n.links.index[n.links.index.str.contains("pink H2")] ] @@ -810,16 +876,16 @@ def set_h2_colors(n): columns=pink_h2.columns, ) - total_blue = linexpr((weightings_blue, blue_h2)).sum().sum() + total_blue = (weightings_blue * blue_h2).sum().sum() - total_pink = linexpr((weightings_pink, pink_h2)).sum().sum() + total_pink = (weightings_pink * pink_h2).sum().sum() rhs_blue = load_h2 * snakemake.config["sector"]["hydrogen"]["blue_share"] rhs_pink = load_h2 * snakemake.config["sector"]["hydrogen"]["pink_share"] - define_constraints(n, total_blue, "=", rhs_blue, "blue_h2_share") + n.model.add_constraints(total_blue == rhs_blue, name="blue_h2_share") - define_constraints(n, total_pink, "=", rhs_pink, "pink_h2_share") + n.model.add_constraints(total_pink == rhs_pink, name="pink_h2_share") def add_existing(n): @@ -854,6 +920,59 @@ def add_existing(n): n.generators.loc[tech_index, tech] = existing_res +def add_lossy_bidirectional_link_constraints(n: pypsa.components.Network) -> None: + """ + Ensures that the two links simulating a bidirectional_link are extended the same amount. + """ + + if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns: + return + + # ensure that the 'reversed' column is boolean and identify all link carriers that have 'reversed' links + n.links["reversed"] = n.links.reversed.fillna(0).astype(bool) + carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841 + + # get the indices of all forward links (non-reversed), that have a reversed counterpart + forward_i = n.links.query( + "carrier in @carriers and ~reversed and p_nom_extendable" + ).index + + # function to get backward (reversed) indices corresponding to forward links + # this function is required to properly interact with the myopic naming scheme + def get_backward_i(forward_i): + return pd.Index( + [ + ( + re.sub(r"-(\d{4})$", r"-reversed-\1", s) + if re.search(r"-\d{4}$", s) + else s + "-reversed" + ) + for s in forward_i + ] + ) + + # get the indices of all backward links (reversed) + backward_i = get_backward_i(forward_i) + + # get the p_nom optimization variables for the links using the get_var function + links_p_nom = n.model["Link-p_nom"] + + # only consider forward and backward links that are present in the optimization variables + subset_forward = forward_i.intersection(links_p_nom.indexes["Link-ext"]) + subset_backward = backward_i.intersection(links_p_nom.indexes["Link-ext"]) + + # ensure we have a matching number of forward and backward links + if len(subset_forward) != len(subset_backward): + raise ValueError("Mismatch between forward and backward links.") + + # define the lefthand side of the constrain p_nom (forward) - p_nom (backward) = 0 + # this ensures that the forward links always have the same maximum nominal power as their backward counterpart + lhs = links_p_nom.loc[backward_i] - links_p_nom.loc[forward_i] + + # add the constraint to the PySPA model + n.model.add_constraints(lhs == 0, name="Link-bidirectional_sync") + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to @@ -876,11 +995,17 @@ def extra_functionality(n, snapshots): for o in opts: if "RES" in o: res_share = float(re.findall("[0-9]*\.?[0-9]+$", o)[0]) - add_RES_constraints(n, res_share) + add_RES_constraints(n, res_share, config) for o in opts: if "EQ" in o: add_EQ_constraints(n, o) + add_battery_constraints(n) + add_lossy_bidirectional_link_constraints(n) + + if snakemake.config["sector"]["chp"]: + logger.info("setting CHP constraints") + add_chp_constraints(n) if ( snakemake.config["policy_config"]["hydrogen"]["temporal_matching"] @@ -927,40 +1052,45 @@ def extra_functionality(n, snapshots): add_co2_sequestration_limit(n, snapshots) -def solve_network(n, config, solving={}, opts="", **kwargs): +def solve_network(n, config, solving, **kwargs): set_of_options = solving["solver"]["options"] cf_solving = solving["options"] - solver_options = solving["solver_options"][set_of_options] if set_of_options else {} - solver_name = solving["solver"]["name"] + kwargs["solver_options"] = ( + solving["solver_options"][set_of_options] if set_of_options else {} + ) + kwargs["solver_name"] = solving["solver"]["name"] + kwargs["extra_functionality"] = extra_functionality - track_iterations = cf_solving.get("track_iterations", False) - min_iterations = cf_solving.get("min_iterations", 4) - max_iterations = cf_solving.get("max_iterations", 6) + skip_iterations = cf_solving.get("skip_iterations", False) + if not n.lines.s_nom_extendable.any(): + skip_iterations = True + logger.info("No expandable lines found. Skipping iterative solving.") # add to network for extra_functionality n.config = config n.opts = opts - if cf_solving.get("skip_iterations", False): - network_lopf( - n, - solver_name=solver_name, - solver_options=solver_options, - extra_functionality=extra_functionality, - **kwargs, - ) + if skip_iterations: + status, condition = n.optimize(**kwargs) else: - ilopf( - n, - solver_name=solver_name, - solver_options=solver_options, - track_iterations=track_iterations, - min_iterations=min_iterations, - max_iterations=max_iterations, - extra_functionality=extra_functionality, - **kwargs, + kwargs["track_iterations"] = (cf_solving.get("track_iterations", False),) + kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),) + kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),) + status, condition = n.optimize.optimize_transmission_expansion_iteratively( + **kwargs ) + + if status != "ok": # and not rolling_horizon: + logger.warning( + f"Solving status '{status}' with termination condition '{condition}'" + ) + if "infeasible" in condition: + labels = n.model.compute_infeasibilities() + logger.info(f"Labels:\n{labels}") + n.model.print_infeasibilities() + raise RuntimeError("Solving status 'infeasible'") + return n @@ -969,28 +1099,28 @@ def solve_network(n, config, solving={}, opts="", **kwargs): from _helpers import mock_snakemake snakemake = mock_snakemake( - "solve_network", + "solve_sector_network", simpl="", clusters="4", ll="c1", opts="Co2L-4H", + planning_horizons="2030", + discountrate="0.071", + demand="AB", + sopts="144H", + h2export="120", + configfile="config.tutorial.yaml", ) configure_logging(snakemake) - tmpdir = snakemake.params.solving.get("tmpdir") - if tmpdir is not None: - Path(tmpdir).mkdir(parents=True, exist_ok=True) opts = snakemake.wildcards.opts.split("-") - solving = snakemake.params.solving + solve_opts = snakemake.config["solving"]["options"] is_sector_coupled = "sopts" in snakemake.wildcards.keys() - if is_sector_coupled: - overrides = override_component_attrs(snakemake.input.overrides) - n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) - else: - n = pypsa.Network(snakemake.input.network) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) if snakemake.params.augmented_line_connection.get("add_to_snakefile"): n.lines.loc[n.lines.index.str.contains("new"), "s_nom_min"] = ( @@ -1016,15 +1146,13 @@ def solve_network(n, config, solving={}, opts="", **kwargs): else: n_ref = None - n = prepare_network(n, solving["options"]) + n = prepare_network(n, solve_opts, config=solve_opts) n = solve_network( n, config=snakemake.config, - solving=solving, - opts=opts, - solver_dir=tmpdir, - solver_logfile=snakemake.log.solver, + solving=snakemake.params.solving, + log_fn=snakemake.log.solver, ) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/test/config.sector.yaml b/test/config.sector.yaml index 670344b0a..71daf0c62 100644 --- a/test/config.sector.yaml +++ b/test/config.sector.yaml @@ -20,8 +20,8 @@ countries: ["NG", "BJ"] electricity: extendable_carriers: - Store: [] - Link: [] + Store: [H2] + Link: [H2 pipeline] co2limit: 7.75e7 @@ -43,7 +43,6 @@ existing_capacities: sector: solid_biomass_potential: 10 # TWh/a, Potential of whole modelled area - gadm_level: 2 snapshots: # arguments to pd.date_range start: "2013-03-1" diff --git a/test/config.test_myopic.yaml b/test/config.test_myopic.yaml index ab306a63a..abe000aba 100644 --- a/test/config.test_myopic.yaml +++ b/test/config.test_myopic.yaml @@ -287,7 +287,6 @@ sector: co2_network: true co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe co2_sequestration_cost: 10 #EUR/tCO2 for sequestration of CO2 - hydrogen_underground_storage: true shipping_hydrogen_liquefaction: false shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011 @@ -302,7 +301,6 @@ sector: NZ_2050: 0.36 DF_2050: 0.12 - gadm_level: 1 h2_cavern: true marginal_cost_storage: 0 methanation: true