diff --git a/.github/workflows/github_test_action.yml b/.github/workflows/github_test_action.yml
index a816bec26..16b888506 100644
--- a/.github/workflows/github_test_action.yml
+++ b/.github/workflows/github_test_action.yml
@@ -25,31 +25,31 @@ jobs:
steps:
- uses: actions/checkout@v4
#- uses: julia-actions/setup-julia@v1.5
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ - uses: actions/checkout@v4
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install pytest pytest-split
- if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- python -m pip install .["all"]
- if ${{ matrix.python-version == '3.9' }}; then python -m pip install pypower; fi
- if ${{ matrix.python-version != '3.9' }}; then python -m pip install numba; fi
- if ${{ matrix.python-version == '3.10' }}; then python -m pip install lightsim2grid; fi
+ uv sync --all-extras
+ if [ -f requirements.txt ]; then uv pip install -r requirements.txt; fi
+ uv pip install pytest-split
+ if ${{ matrix.python-version == '3.9' }}; then uv pip install pypower; fi
+ if ${{ matrix.python-version != '3.9' }}; then uv pip install numba; fi
+ if ${{ matrix.python-version == '3.10' }}; then uv pip install lightsim2grid; fi
- name: List of installed packages
run: |
- python -m pip list
+ uv pip list
- name: Test with pytest
if: ${{ matrix.python-version != '3.9' }}
run: |
- python -m pytest --splits 2 --group ${{ matrix.group }}
+ uv run pytest --splits 2 --group ${{ matrix.group }}
- name: Test with pytest, Codecov and Coverage
if: ${{ matrix.python-version == '3.9' }}
run: |
- python -m pip install pytest-cov
- python -m pytest -n=auto --cov=./ --cov-report=xml --splits 2 --group ${{ matrix.group }}
+ uv pip install pytest-cov
+ uv run pytest -n=auto --cov=./ --cov-report=xml --splits 2 --group ${{ matrix.group }}
cp ./coverage.xml ./coverage-${{ matrix.group }}.xml
- name: Upload coverage as artifact
if: ${{ matrix.python-version == '3.9' }}
@@ -71,29 +71,27 @@ jobs:
steps:
- uses: actions/checkout@v4
#- uses: julia-actions/setup-julia@v1.5
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install pytest pytest-split
- if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- python -m pip install .["all"]
- python -m pip install pypower
+ uv sync --all-extras
+ uv pip install pypower pytest-split
+ if [ -f requirements.txt ]; then uv pip install -r requirements.txt; fi
- name: Install Julia
run: |
./.install_julia.sh 1.10.4
- python -m pip install julia
- python ./.install_pycall.py
+ uv pip install julia
+ uv run python ./.install_pycall.py
- name: List of installed packages
run: |
- python -m pip list
+ uv pip list
- name: Test with pytest, Codecov and Coverage
run: |
- python -m pip install pytest-cov
- python -m pytest -n=auto --cov=./ --cov-report=xml --splits 2 --group ${{ matrix.group }}
+ uv pip install pytest-cov
+ uv run pytest -n=auto --cov=./ --cov-report=xml --splits 2 --group ${{ matrix.group }}
cp ./coverage.xml ./coverage-${{ matrix.group }}.xml
upload-coverage:
@@ -137,22 +135,21 @@ jobs:
group: [ 1, 2 ]
steps:
- uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install pytest pytest-split
- if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- python -m pip install .["all"]
+ uv sync --all-extras
+ uv pip install pytest-split
+ if [ -f requirements.txt ]; then uv pip install -r requirements.txt; fi
- name: List of installed packages
run: |
- python -m pip list
+ uv pip list
- name: Test with pytest
run: |
- python -m pytest -W error --splits 2 --group ${{ matrix.group }}
+ uv run pytest -W error --splits 2 --group ${{ matrix.group }}
relying: # packages that rely on pandapower
runs-on: ubuntu-latest
@@ -161,31 +158,30 @@ jobs:
python-version: ['3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install pytest setuptools
- if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- python -m pip install .
- python -m pip install matplotlib
- if ${{ matrix.python-version != '3.9' }}; then python -m pip install numba; fi
+ uv sync --extra test
+ uv pip install setuptools
+ if [ -f requirements.txt ]; then uv pip install -r requirements.txt; fi
+ uv pip install matplotlib
+ if ${{ matrix.python-version != '3.9' }}; then uv pip install numba; fi
- name: Install pandapipes and simbench
run: |
- python -m pip install git+https://github.com/e2nIEE/pandapipes@develop#egg=pandapipes
- python -m pip install git+https://github.com/e2nIEE/simbench@develop#egg=simbench
+ uv pip install git+https://github.com/e2nIEE/pandapipes@develop#egg=pandapipes
+ uv pip install git+https://github.com/e2nIEE/simbench@develop#egg=simbench
- name: List of installed packages
run: |
- python -m pip list
+ uv pip list
- name: Test pandapipes
run: |
- python -c 'from pandapipes import pp_dir; import pytest; import sys; ec = pytest.main([pp_dir]); sys.exit(ec)'
+ uv run python -c 'from pandapipes import pp_dir; import pytest; import sys; ec = pytest.main([pp_dir]); sys.exit(ec)'
- name: Test simbench
run: |
- python -c 'from simbench import sb_dir; import pytest; import sys; ec = pytest.main([sb_dir]); sys.exit(ec)'
+ uv run python -c 'from simbench import sb_dir; import pytest; import sys; ec = pytest.main([sb_dir]); sys.exit(ec)'
linting:
# run flake8 and check for errors
@@ -197,28 +193,26 @@ jobs:
python-version: ['3.10']
steps:
- uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install flake8
- python -m pip install .
- python -m pip install matplotlib
+ uv sync
+ uv pip install flake8 matplotlib
- name: List of installed packages
run: |
- python -m pip list
+ uv pip list
- name: Lint with flake8 (sytax errors and undefined names)
continue-on-error: true
run: |
# stop the build if there are Python syntax errors or undefined names (omitted by exit-zero)
- flake8 . --count --exit-zero --select=E9,F63,F7,F82 --show-source --statistics
+ uv run flake8 . --exclude .venv --count --exit-zero --select=E9,F63,F7,F82 --show-source --statistics
- name: Lint with flake8 (all errors and warnings)
run: |
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+ uv run flake8 . --exclude .venv --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
postgresql:
# for the one test to cover postgresql
@@ -228,17 +222,16 @@ jobs:
python-version: ['3.12']
steps:
- uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install .[test,fileio]
+ uv sync --extra test --extra fileio
- name: List of installed packages
run: |
- python -m pip list
+ uv pip list
- name: Create PostgreSQL database
run: |
sudo systemctl start postgresql.service
@@ -249,73 +242,61 @@ jobs:
PGPASSWORD=secret psql --username=test_user --host=localhost --list sandbox
- name: Test pandapower File I/O
run: |
- python -c "import os; import json; from pandapower import pp_dir; conn_data={'host': 'localhost', 'user': 'test_user', 'database': 'sandbox', 'password': 'secret', 'schema': 'test_schema'}; fp = open(os.path.join(pp_dir, 'test', 'test_files', 'postgresql_connect_data.json'), 'w'); json.dump(conn_data, fp); fp.close()"
- python -c 'from pandapower import pp_dir; import pytest; import sys; import os; ec = pytest.main([os.path.join(pp_dir,"test","api","test_sql_io.py")]); sys.exit(ec)'
+ uv run python -c "import os; import json; from pandapower import pp_dir; conn_data={'host': 'localhost', 'user': 'test_user', 'database': 'sandbox', 'password': 'secret', 'schema': 'test_schema'}; fp = open(os.path.join(pp_dir, 'test', 'test_files', 'postgresql_connect_data.json'), 'w'); json.dump(conn_data, fp); fp.close()"
+ uv run python -c 'from pandapower import pp_dir; import pytest; import sys; import os; ec = pytest.main([os.path.join(pp_dir,"test","api","test_sql_io.py")]); sys.exit(ec)'
tutorial_tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- - name: Set up Python
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: '3.11'
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install pytest nbmake pytest-xdist igraph numba seaborn
+ uv sync --all-extras
+ uv pip install seaborn
./.install_julia.sh 1.10.4
- python -m pip install julia
- python ./.install_pycall.py
- python -m pip install jupyter
- python -m pip install .["all"]
+ uv pip install julia seaborn jupyter
+ uv run python ./.install_pycall.py
- name: List all installed packages
run: |
- python -m pip list
+ uv pip list
- name: Test with pytest
# Careful when copying this command. The PYTHONPATH setup is Linux specific syntax.
run: |
- python -m pytest --nbmake -n=auto --nbmake-timeout=900 "./tutorials"
+ uv run pytest --nbmake -n=auto --nbmake-timeout=900 "./tutorials"
tutorial_warnings_tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- - name: Set up Python
- uses: actions/setup-python@v5
+ - name: Install uv
+ uses: astral-sh/setup-uv@38f3f104447c67c051c4a08e39b64a148898af3a #v4.2.0
with:
python-version: '3.11'
- name: Install dependencies
run: |
- python -m pip install --upgrade pip
- python -m pip install .[all]
- python -m pip install pytest nbmake pytest-xdist igraph numba seaborn
+ uv sync --all-extras
./.install_julia.sh 1.10.4
- python -m pip install julia
- python ./.install_pycall.py
+ uv pip install julia seaborn
+ uv run python ./.install_pycall.py
- name: List all installed packages
run: |
- python -m pip list
+ uv pip list
- name: Test with pytest
run: |
- python -m pytest -W error --nbmake -n=auto --nbmake-timeout=900 "./tutorials"
+ uv run pytest -W error --nbmake -n=auto --nbmake-timeout=900 "./tutorials"
docs_check:
+ name: Sphinx docs check
runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: [ '3.9' ]
steps:
- uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
- with:
- python-version: ${{ matrix.python-version }}
- - name: Check docs for Python ${{ matrix.python-version }}
- uses: e2nIEE/sphinx-action@master
+ - name: Check sphinx build
+ uses: ammaraskar/sphinx-action@7.4.7
with:
- pre-build-command: "apt update && apt upgrade -y && apt install -y build-essential gfortran cmake pkg-config libopenblas-dev;
- python -m pip install --upgrade pip;
- python -m pip install .[docs];"
+ pre-build-command: "python -m pip install uv && uv pip install .[docs] --system --link-mode=copy"
build-command: "sphinx-build -b html . _build -W"
docs-folder: "doc/"
diff --git a/.install_julia.sh b/.install_julia.sh
index a260cecfc..2f3b351fd 100755
--- a/.install_julia.sh
+++ b/.install_julia.sh
@@ -86,8 +86,7 @@ case $(uname) in
curl -L "$BASEURL/linux/$ARCH/$JULIANAME-$SUFFIX.tar.gz" | tar -xz
sudo ln -s $PWD/julia-*/bin/julia /usr/local/bin/julia
julia -e 'import Pkg; Pkg.add("PyCall");'
- julia -e 'import Pkg; Pkg.add("PowerModels"); Pkg.add("Ipopt");'
- julia -e 'import Pkg; Pkg.add("JSON"); Pkg.add("JuMP"); Pkg.add("Cbc"); Pkg.add("Juniper");'
+ julia -e 'import Pkg; Pkg.Registry.update(); Pkg.add("PandaModels"); Pkg.build(); Pkg.resolve();'
;;
Darwin)
if [ -e /usr/local/bin/julia ]; then
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 049f52637..cbf1d08a0 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -5,26 +5,15 @@ Change Log
-------------------------------
- [ADDED] pandas series accessor for geo column
- [FIXED] Increasing geojson precision as the default precision might cause problems with pandahub
+- [ADDED] converter for European EHV grid data from JAO, the "Single Allocation Platform (SAP) for all European Transmission System Operators (TSOs) that operate in accordance to EU legislation"
- [ADDED] Add GeographicalRegion and SubGeographicalRegion names and ids to bus df in cim converter
- [CHANGED] Capitalize first letter of columns busbar_id, busbar_name and substation_id in bus df for cim converter
- [FIXED] Do not modify pandas options when importing pandapower
+- [FIXED] fixed copy-paste error in contingency results "max_limit_nminus1" and "min_limit_nminus1"
+- [ADDED] improved lightsim2grid documentation including compatibitliy issues
- [FIXED] cim2pp: set default xml encoding to None to avoid error after changing to lxml
-
-[2.14.11] - 2024-07-08
--------------------------------
-- [FIXED] Lightsim2grid version
-
-[2.14.10] - 2024-07-08
--------------------------------
-- [FIXED] geopandas version
-
-[2.14.9] - 2024-06-25
--------------------------------
-- [FIXED] scipy version
-
-[upcoming release] - 2024-..-..
--------------------------------
-
+- [FIXED] PandaModels OPF with 'bus_dc' key errors
+- [FIXED] julia tests
- [FIXED] copy array element to standard python scalar
- [FIXED] passing literal json to 'read_json' is deprecated
- [FIXED] replacing deprecated in1d with isin
@@ -96,8 +85,20 @@ Change Log
- [CHANGED] Trafo Controllers can now be added to elements that are out of service, changed self.nothing_to_do()
- [ADDED] Discrete shunt controller for local voltage regulation with shunt steps
- [ADDED] fix lengths missmatch of output if ignore_zero_length is False in plotting utility function coords_from_node_geodata() and rename ignore_zero_length by ignore_no_geo_diff
+- [ADDED] converter for European EHV grid data from JAO, the "Single Allocation Platform (SAP) for all European Transmission System Operators (TSOs) that operate in accordance to EU legislation"
- [ADDED] cim2pp converter: Using lxml to parse XML files (better performance)
-- [ADDED] possibility to load JSON files with unknown object models and just store the models as dictionaries in the network
+
+[2.14.11] - 2024-07-08
+-------------------------------
+- [FIXED] Lightsim2grid version
+
+[2.14.10] - 2024-07-08
+-------------------------------
+- [FIXED] geopandas version
+
+[2.14.9] - 2024-06-25
+-------------------------------
+- [FIXED] scipy version
[2.14.7] - 2024-06-14
-------------------------------
diff --git a/doc/converter.rst b/doc/converter.rst
index 7805ae123..633d7a32b 100644
--- a/doc/converter.rst
+++ b/doc/converter.rst
@@ -16,4 +16,5 @@ These tools are:
converter/matpower
converter/powerfactory
converter/cgmes
+ converter/jao
diff --git a/doc/converter/jao.rst b/doc/converter/jao.rst
new file mode 100644
index 000000000..e91995163
--- /dev/null
+++ b/doc/converter/jao.rst
@@ -0,0 +1,9 @@
+Documentation for the JAO Static Grid Model Converter Function
+==============================================================
+
+The ``from_jao`` function allows users to convert the Static Grid Model provided by JAO (Joint Allocation Office) into a pandapower network by reading and processing the provided Excel and HTML files.
+
+Function Overview
+-----------------
+
+.. autofunction:: pandapower.converter.from_jao
diff --git a/doc/powerflow/ac.rst b/doc/powerflow/ac.rst
index bc6e46c55..331e4abc0 100644
--- a/doc/powerflow/ac.rst
+++ b/doc/powerflow/ac.rst
@@ -13,12 +13,69 @@ pandapower uses PYPOWER to solve the power flow problem:
If you are interested in the pypower casefile that pandapower is using for power flow, you can find it in net["_ppc"].
However all necessary informations are written into the pandpower format net, so the pandapower user should not usually have to deal with pypower.
-If available, the librabry lightsim2grid is used as a backend for power flow simulation instead of the
-implementation in pandapower, leading to a boost in performance. The library lightsim2grid is implemented in C++ and
-can either be installed with pip install lightsim2grid, or built from source. More about the library and the
-installation guide can be found in the `documentation `_ or
+Accelerating Packages
+-------------------------
+
+Two external packages are available which let accelerate pandapower's power flow command :code:`runppp`:
+
+1. numba
+2. lightsim2grid
+
+If available, i.e. installed on the operating computer, the code will check by default all
+prerequisites to use the external packages. numba is a python JIT compiler,
+cf. `link `_. In constrast, the library lightsim2grid
+is used as a backend for power flow simulation instead of the
+implementation in pandapower. It leads to a boost in performance. The library lightsim2grid is
+implemented in C++ and can either be installed with pip install lightsim2grid, or built from source.
+More about the library and the installation guide can be found in the
+`documentation `_ or
its GitHub `repository `_.
+lightsim2grid Compatibility
+```````````````````````````````
+
+lightsim2grid is supported if all the following conditions are met:
+
+1. The lightsim2grid library is installed and available.
+2. The selected power flow algorithm is Newton-Raphson (algorithm='nr').
+3. Voltage-dependent loads are not enabled (voltage_depend_loads=False).
+4. Either:
+
+ * There is only one slack bus in the network, or
+ * Distributed slack is enabled (distributed_slack=True).
+
+5. None of the following elements are present in the grid model:
+
+ * Controllable shunts, including SVC, SSC, or VSC elements.
+ * Controllable impedances, such as TCSC elements.
+ * DC elements, including DC buses (bus_dc) or DC lines (line_dc).
+
+6. Temperature-Dependent Power Flow is not requested (tdpf=False).
+
+When lightsim2grid is Not Supported
+```````````````````````````````````````
+
+If any of the above conditions are not met, lightsim2grid cannot be used. In such cases:
+
+* If lightsim2grid='auto' (default), the fallback to the standard pandapower implementation occurs without a detailed message.
+* If lightsim2grid=True is explicitly set, an appropriate error or warning is raised or logged, depending on the condition.
+
+Common Limitations of lightsim2grid
+````````````````````````````````````````
+
+lightsim2grid does not currently support:
+
+* Algorithms other than Newton-Raphson
+* Voltage-dependent loads
+* Multiple slack buses without distributed slack
+* Grids containing any of the following advanced elements:
+
+ * Controllable shunts (SVC, SSC, VSC)
+ * Controllable impedances (TCSC)
+ * DC buses or DC lines
+
+* Temperature-Dependent Power Flow (tdpf=True)
+
Temperature-Dependent Power Flow (TDPF)
---------------------------------------
diff --git a/doc/requirements.txt b/doc/requirements.txt
deleted file mode 100644
index 2433ee980..000000000
--- a/doc/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-sphinx>=5.3.0
-sphinx_rtd_theme>=1.1.1
-numpydoc>=1.5.0
\ No newline at end of file
diff --git a/pandapower/auxiliary.py b/pandapower/auxiliary.py
index 1d9cb6f4d..507224db5 100644
--- a/pandapower/auxiliary.py
+++ b/pandapower/auxiliary.py
@@ -1273,9 +1273,11 @@ def _check_if_numba_is_installed(level="warning"):
def _check_lightsim2grid_compatibility(net, lightsim2grid, voltage_depend_loads, algorithm, distributed_slack, tdpf):
"""
- Implement some checks to decide whether the package lightsim2grid can be used. The package implements a backend for
- power flow calculation in C++ and provides a speed-up. If lightsim2grid is "auto" (default), we don't bombard the
- user with messages. Otherwise, if lightsim2grid is True bus cannot be used, we inform the user abot it.
+ Implement some checks to decide whether the package lightsim2grid can be used. These checks are
+ documentated in :code:`doc\powerflow\ac.rst` The package implements a backend for power flow
+ calculation in C++ and provides a speed-up. If lightsim2grid
+ is "auto" (default), we don't bombard the user with messages. Otherwise, if lightsim2grid is
+ True bus cannot be used, we inform the user abot it.
"""
if not lightsim2grid:
return False # early return :)
diff --git a/pandapower/build_branch.py b/pandapower/build_branch.py
index 8d5930bc9..45d455aa3 100644
--- a/pandapower/build_branch.py
+++ b/pandapower/build_branch.py
@@ -162,13 +162,15 @@ def _calc_trafo3w_parameter(net, ppc, update_vk_values: bool=True):
branch[f:t, SHIFT] = shift
branch[f:t, BR_STATUS] = in_service
# always set RATE_A for completeness
+ # RATE_A is conisdered by the (PowerModels) OPF. If zero -> unlimited
if "max_loading_percent" in trafo_df:
max_load = get_trafo_values(trafo_df, "max_loading_percent")
sn_mva = get_trafo_values(trafo_df, "sn_mva")
branch[f:t, RATE_A] = max_load / 100. * sn_mva
else:
- sn_mva = get_trafo_values(trafo_df, "sn_mva")
- branch[f:t, RATE_A] = sn_mva
+ # PowerModels considers "0" as "no limit"
+ # todo: inf and convert only when using PowerModels to 0., pypower opf converts the zero to inf
+ branch[f:t, RATE_A] = 0. if net["_options"]["mode"] == "opf" else 100.
def _calc_line_parameter(net, ppc, elm="line", ppc_elm="branch"):
@@ -244,13 +246,16 @@ def _calc_line_parameter(net, ppc, elm="line", ppc_elm="branch"):
branch[f:t, BR_STATUS] = line["in_service"].values
# always set RATE_A for completeness:
# RATE_A is conisdered by the (PowerModels) OPF. If zero -> unlimited
- # TODO: check why OPF test fails if 100 instead of 0
- max_load = line.max_loading_percent.values if "max_loading_percent" in line else 0.
- vr = net.bus.loc[line["from_bus"].values, "vn_kv"].values * np.sqrt(3.)
- max_i_ka = line.max_i_ka.values
- df = line.df.values
- # This calculates the maximum apparent power at 1.0 p.u.
- branch[f:t, RATE_A] = max_load / 100. * max_i_ka * df * parallel * vr
+ if "max_loading_percent" in line:
+ max_load = line.max_loading_percent.values
+ vr = net.bus.loc[line["from_bus"].values, "vn_kv"].values * np.sqrt(3.)
+ max_i_ka = line.max_i_ka.values
+ df = line.df.values
+ branch[f:t, RATE_A] = max_load / 100. * max_i_ka * df * parallel * vr
+ else:
+ # PowerModels considers "0" as "no limit"
+ # todo: inf and convert only when using PowerModels to 0., pypower opf converts the zero to inf
+ branch[f:t, RATE_A] = 0. if mode == "opf" else 100.
def _calc_line_dc_parameter(net, ppc, elm="line_dc", ppc_elm="branch_dc"):
@@ -321,12 +326,17 @@ def _calc_line_dc_parameter(net, ppc, elm="line_dc", ppc_elm="branch_dc"):
branch_dc[f:t, DC_BR_STATUS] = line_dc["in_service"].values
# always set RATE_A for completeness:
# RATE_A is conisdered by the (PowerModels) OPF. If zero -> unlimited
- max_load = line_dc.max_loading_percent.values if "max_loading_percent" in line_dc else 0.
- vr = net.bus_dc.loc[line_dc["from_bus_dc"].values, "vn_kv"].values * np.sqrt(3.)
- max_i_ka = line_dc.max_i_ka.values
- df = line_dc.df.values
- # This calculates the maximum apparent power at 1.0 p.u.
- branch_dc[f:t, DC_RATE_A] = max_load / 100. * max_i_ka * df * parallel * vr
+ if "max_loading_percent" in line_dc:
+ max_load = line_dc.max_loading_percent.values
+ vr = net.bus_dc.loc[line_dc["from_bus_dc"].values, "vn_kv"].values * np.sqrt(3.)
+ max_i_ka = line_dc.max_i_ka.values
+ df = line_dc.df.values
+ # This calculates the maximum apparent power at 1.0 p.u.
+ branch_dc[f:t, DC_RATE_A] = max_load / 100. * max_i_ka * df * parallel * vr
+ else:
+ # PowerModels considers "0" as "no limit"
+ # todo: inf and convert only when using PowerModels to 0., pypower opf converts the zero to inf
+ branch_dc[f:t, DC_RATE_A] = 0. if mode == "opf" else 100.
def _calc_trafo_parameter(net, ppc, update_vk_values: bool=True):
@@ -364,10 +374,16 @@ def _calc_trafo_parameter(net, ppc, update_vk_values: bool=True):
raise UserWarning("Rating factor df must be positive. Transformers with false "
"rating factors: %s" % trafo.query('df<=0').index.tolist())
# always set RATE_A for completeness
- max_load = trafo.max_loading_percent.values if "max_loading_percent" in trafo else 100
- sn_mva = trafo.sn_mva.values
- df = trafo.df.values
- branch[f:t, RATE_A] = max_load / 100. * sn_mva * df * parallel
+ # RATE_A is conisdered by the (PowerModels) OPF. If zero -> unlimited
+ if "max_loading_percent" in trafo:
+ max_load = trafo.max_loading_percent.values
+ sn_mva = trafo.sn_mva.values
+ df = trafo.df.values
+ branch[f:t, RATE_A] = max_load / 100. * sn_mva * df * parallel
+ else:
+ # PowerModels considers "0" as "no limit"
+ # todo: inf and convert only when using PowerModels to 0., pypower opf converts the zero to inf
+ branch[f:t, RATE_A] = 0. if net["_options"]["mode"] == "opf" else 100.
def get_trafo_values(trafo_df, par):
diff --git a/pandapower/contingency/contingency.py b/pandapower/contingency/contingency.py
index 6b6b51d7e..edcaf2398 100644
--- a/pandapower/contingency/contingency.py
+++ b/pandapower/contingency/contingency.py
@@ -362,11 +362,11 @@ def get_element_limits(net):
"max_limit": net.bus.loc[bus_index, "max_vm_pu"].values,
"min_limit": net.bus.loc[bus_index, "min_vm_pu"].values,
"max_limit_nminus1":
- net.line.loc[bus_index, "max_vm_nminus1_pu"].values
+ net.bus.loc[bus_index, "max_vm_nminus1_pu"].values
if "max_vm_nminus1_pu" in net.bus.columns
else net.bus.loc[bus_index, "max_vm_pu"].values,
"min_limit_nminus1":
- net.line.loc[bus_index, "min_vm_nminus1_pu"].values
+ net.bus.loc[bus_index, "min_vm_nminus1_pu"].values
if "min_vm_nminus1_pu" in net.bus.columns
else net.bus.loc[bus_index, "min_vm_pu"].values}})
diff --git a/pandapower/converter/__init__.py b/pandapower/converter/__init__.py
index acdf19579..5df505f2a 100644
--- a/pandapower/converter/__init__.py
+++ b/pandapower/converter/__init__.py
@@ -3,3 +3,4 @@
from pandapower.converter.pandamodels import *
from pandapower.converter.cim import *
from pandapower.converter.powerfactory import *
+from pandapower.converter.jao import *
diff --git a/pandapower/converter/cim/cim2pp/from_cim.py b/pandapower/converter/cim/cim2pp/from_cim.py
index 8b32f9414..d93dd8d21 100644
--- a/pandapower/converter/cim/cim2pp/from_cim.py
+++ b/pandapower/converter/cim/cim2pp/from_cim.py
@@ -123,7 +123,7 @@ def from_cim(file_list: List[str] = None, encoding: str = None, convert_line_to_
if there are errors in the conversion. Default: True.
:param file_list: The path to the CGMES files as a list.
- :param encoding: The encoding from the files. Optional, default: utf-8
+ :param encoding: The encoding from the files. Optional, default: None
:param convert_line_to_switch: Set this parameter to True to enable line -> switch conversion. All lines with a
resistance lower or equal than line_r_limit or a reactance lower or equal than line_x_limit will become a
switch. Optional, default: False
diff --git a/pandapower/converter/cim/cim_classes.py b/pandapower/converter/cim/cim_classes.py
index 80cd72eab..5c6c6ea92 100644
--- a/pandapower/converter/cim/cim_classes.py
+++ b/pandapower/converter/cim/cim_classes.py
@@ -34,14 +34,14 @@ def __init__(self, cim: Dict[str, Dict[str, pd.DataFrame]] = None, cgmes_version
self.file_names: Dict[str, str] = dict()
self.report_container = ReportContainer()
- def parse_files(self, file_list: List[str] or str = None, encoding: str = 'utf-8', prepare_cim_net: bool = False,
+ def parse_files(self, file_list: List[str] or str = None, encoding: str = None, prepare_cim_net: bool = False,
set_data_types: bool = False) -> CimParser:
"""
Parse CIM XML files from a storage.
:param file_list: The path to the CGMES files as a list. Note: The files need a FullModel to parse the
CGMES profile. Optional, default: None.
- :param encoding: The encoding from the files. Optional, default: utf-8
+ :param encoding: The encoding from the files. Optional, default: None
:param prepare_cim_net: Set this parameter to True to prepare the parsed cim data according to the
CimConverter. Optional, default: False
:param set_data_types: Set this parameter to True to set the cim data types at the parsed data. Optional,
diff --git a/pandapower/converter/jao/__init__.py b/pandapower/converter/jao/__init__.py
new file mode 100644
index 000000000..2aae42afa
--- /dev/null
+++ b/pandapower/converter/jao/__init__.py
@@ -0,0 +1 @@
+from .from_jao import from_jao
\ No newline at end of file
diff --git a/pandapower/converter/jao/from_jao.py b/pandapower/converter/jao/from_jao.py
new file mode 100644
index 000000000..e527eb745
--- /dev/null
+++ b/pandapower/converter/jao/from_jao.py
@@ -0,0 +1,1050 @@
+# -*- coding: utf-8 -*-nt
+
+# Copyright (c) 2016-2024 by University of Kassel and Fraunhofer Institute for Energy Economics
+# and Energy System Technology (IEE), Kassel. All rights reserved.
+
+from copy import deepcopy
+import os
+import json
+from functools import reduce
+from typing import Optional, Union
+import numpy as np
+import pandas as pd
+from pandas.api.types import is_integer_dtype, is_object_dtype
+from pandapower.io_utils import pandapowerNet
+from pandapower.create import create_empty_network, create_buses, create_lines_from_parameters, \
+ create_transformers_from_parameters
+from pandapower.topology import create_nxgraph, connected_components
+from pandapower.plotting import set_line_geodata_from_bus_geodata
+from pandapower.toolbox import drop_buses, fuse_buses
+
+try:
+ import pandaplan.core.pplog as logging
+except ImportError:
+ import logging
+
+logger = logging.getLogger(__name__)
+
+
+def from_jao(excel_file_path: str,
+ html_file_path: Optional[str],
+ extend_data_for_grid_group_connections: bool,
+ drop_grid_groups_islands: bool = False,
+ apply_data_correction: bool = True,
+ max_i_ka_fillna: Union[float, int] = 999,
+ **kwargs) -> pandapowerNet:
+ """Converts European (Core) EHV grid data provided by JAO (Joint Allocation Office), the
+ "Single Allocation Platform (SAP) for all European Transmission System Operators (TSOs) that
+ operate in accordance to EU legislation".
+
+ **Data Sources and Availability:**
+
+ The data are available at the website
+ `JAO Static Grid Model `_ (November 2024).
+ There, a map is provided to get an fine overview of the geographical extent and the scope of
+ the data. These inlcude information about European (Core) lines, tielines, and transformers.
+
+ **Limitations:**
+
+ No information is available on load or generation.
+ The data quality with regard to the interconnection of the equipment, the information provided
+ and the (incomplete) geodata should be considered with caution.
+
+ **Features of the converter:**
+
+ - **Data Correction:** corrects known data inconsistencies, such as inconsistent spellings and missing necessary information.
+ - **Geographical Data Parsing:** Parses geographical data from the HTML file to add geolocation information to buses and lines.
+ - **Grid Group Connections:** Optionally extends the network by connecting islanded grid groups to avoid disconnected components.
+ - **Data Customization:** Allows for customization through additional parameters to control transformer creation, grid group dropping, and voltage level deviations.
+
+ Parameters
+ ----------
+ excel_file_path : str
+ input data including electrical parameters of grids' utilities, stored in multiple sheets
+ of an excel file
+ html_file_path : str
+ input data for geo information. If The converter should be run without geo information, None
+ can be passed., provided by an html file
+ extend_data_for_grid_group_connections : bool
+ if True, connections (additional transformers and merging buses) are created to avoid
+ islanded grid groups, by default False
+ drop_grid_groups_islands : bool, optional
+ if True, islanded grid groups will be dropped if their number of buses is below
+ min_bus_number (default is 6), by default False
+ apply_data_correction : bool, optional
+ _description_, by default True
+ max_i_ka_fillna : float | int, optional
+ value to fill missing values or data of false type in max_i_ka of lines and transformers.
+ If no value should be set, you can also pass np.nan. By default 999
+
+ Returns
+ -------
+ pandapowerNet
+ net created from the jao data
+
+ Additional Parameters
+ ---------------------
+ minimal_trafo_invention : bool, optional
+ applies if extend_data_for_grid_group_connections is True. Then, if minimal_trafo_invention
+ is True, adding transformers stops when no grid groups is islanded anymore (does not apply
+ for release version 5 or 6, i.e. it does not care what value is passed to
+ minimal_trafo_invention). If False, all equally named buses that have different voltage
+ level and lay in different groups will be connected via additional transformers,
+ by default False
+ min_bus_number : Union[int,str], optional
+ Threshold value to decide which small grid groups should be dropped and which large grid
+ groups should be kept. If all islanded grid groups should be dropped except of the one
+ largest, set "max". If all grid groups that do not contain a slack element should be
+ dropped, set "unsupplied". By default 6
+ rel_deviation_threshold_for_trafo_bus_creation : float, optional
+ If the voltage level of transformer locations is far different than the transformer data,
+ additional buses are created. rel_deviation_threshold_for_trafo_bus_creation defines the
+ tolerance in which no additional buses are created. By default 0.2
+ log_rel_vn_deviation : float, optional
+ This parameter allows a range below rel_deviation_threshold_for_trafo_bus_creation in which
+ a warning is logged instead of a creating additional buses. By default 0.12
+
+ Examples
+ --------
+ >>> from pathlib import Path
+ >>> import os
+ >>> import pandapower as pp
+ >>> net = pp.converter.from_jao()
+ >>> home = str(Path.home())
+ >>> # assume that the files are located at your desktop:
+ >>> excel_file_path = os.path.join(home, "desktop", "202409_Core Static Grid Mode_6th release")
+ >>> html_file_path = os.path.join(home, "desktop", "2024-09-13_Core_SGM_publication.html")
+ >>> net = from_jao(excel_file_path, html_file_path, True, drop_grid_groups_islands=True)
+ """
+
+ # --- read data
+ data = pd.read_excel(excel_file_path, sheet_name=None, header=[0, 1])
+ if html_file_path is not None:
+ with open(html_file_path, mode='r', encoding=kwargs.get("encoding", "utf-8")) as f:
+ html_str = f.read()
+ else:
+ html_str = ""
+
+ # --- manipulate data / data corrections
+ if apply_data_correction:
+ html_str = _data_correction(data, html_str, max_i_ka_fillna)
+
+ # --- parse html_str to line_geo_data
+ line_geo_data = None
+ if html_str:
+ try:
+ line_geo_data = _parse_html_str(html_str)
+ except (json.JSONDecodeError, KeyError, AssertionError) as e:
+ logger.error(f"html data were ignored due to this error:\n{e}")
+
+ # --- create the pandapower net
+ net = create_empty_network(name=os.path.splitext(os.path.basename(excel_file_path))[0],
+ **{key: val for key, val in kwargs.items() if key == "sn_mva"})
+ _create_buses_from_line_data(net, data)
+ _create_lines(net, data, max_i_ka_fillna)
+ _create_transformers_and_buses(net, data, **kwargs)
+
+ # --- invent connections between grid groups
+ if extend_data_for_grid_group_connections:
+ _invent_connections_between_grid_groups(net, **kwargs)
+
+ # --- drop islanded grid groups
+ if drop_grid_groups_islands:
+ drop_islanded_grid_groups(net, kwargs.get("min_bus_number", 6))
+
+ # --- add geo data to buses and lines
+ if line_geo_data is not None:
+ _add_bus_geo(net, line_geo_data)
+ set_line_geodata_from_bus_geodata(net)
+
+ return net
+
+# --- secondary functions --------------------------------------------------------------------------
+
+
+def _data_correction(
+ data: dict[str, pd.DataFrame],
+ html_str: Optional[str],
+ max_i_ka_fillna: Union[float, int]) -> Optional[str]:
+ """Corrects input data in particular with regard to obvious weaknesses in the data provided,
+ such as inconsistent spellings and missing necessary information
+
+ Parameters
+ ----------
+ data : dict[str, pd.DataFrame]
+ data provided by the excel file which will be corrected
+ html_str : str | None
+ data provided by the html file which will be corrected
+ max_i_ka_fillna : float | int
+ value to fill missing values or data of false type in max_i_ka of lines and transformers.
+ If no value should be set, you can also pass np.nan.
+
+ Returns
+ -------
+ str
+ corrected html_str
+ """
+ # old name -> new name
+ rename_locnames = [("PSTMIKULOWA", "PST MIKULOWA"),
+ ("Chelm", "CHELM"),
+ ("OLSZTYN-MATK", "OLSZTYN-MATKI"),
+ ("STANISLAWOW", "Stanislawow"),
+ ("VIERRADEN", "Vierraden")]
+
+ # --- Line and Tieline data ---------------------------
+ for key in ["Lines", "Tielines"]:
+
+ # --- correct column names
+ cols = data[key].columns.to_frame().reset_index(drop=True)
+ cols.loc[cols[1] == "Voltage_level(kV)", 0] = None
+ cols.loc[cols[1] == "Comment", 0] = None
+ cols.loc[cols[0].str.startswith("Unnamed:").astype(bool), 0] = None
+ cols.loc[cols[1] == "Length_(km)", 0] = "Electrical Parameters" # might be wrong in
+ # Tielines otherwise
+ data[key].columns = pd.MultiIndex.from_arrays(cols.values.T)
+
+ # --- correct comma separation and cast to floats
+ data[key][("Maximum Current Imax (A)", "Fixed")] = \
+ data[key][("Maximum Current Imax (A)", "Fixed")].replace(
+ "\xa0", max_i_ka_fillna*1e3).replace(
+ "-", max_i_ka_fillna*1e3).replace(" ", max_i_ka_fillna*1e3)
+ col_names = [("Electrical Parameters", col_level1) for col_level1 in [
+ "Length_(km)", "Resistance_R(Ω)", "Reactance_X(Ω)", "Susceptance_B(μS)",
+ "Length_(km)"]] + [("Maximum Current Imax (A)", "Fixed")]
+ _float_col_comma_correction(data, key, col_names)
+
+ # --- consolidate to one way of name capitalization
+ for loc_name in [(None, "NE_name"), ("Substation_1", "Full_name"),
+ ("Substation_2", "Full_name")]:
+ data[key].loc[:, loc_name] = data[key].loc[:, loc_name].str.strip().apply(
+ _multi_str_repl, repl=rename_locnames)
+ html_str = _multi_str_repl(html_str, rename_locnames)
+
+ # --- Transformer data --------------------------------
+ key = "Transformers"
+
+ # --- fix Locations
+ loc_name = ("Location", "Full Name")
+ data[key].loc[:, loc_name] = data[key].loc[:, loc_name].str.strip().apply(
+ _multi_str_repl, repl=rename_locnames)
+
+ # --- fix data in nonnull_taps
+ taps = data[key].loc[:, ("Phase Shifting Properties", "Taps used for RAO")].fillna("").astype(
+ str).str.replace(" ", "")
+ nonnull = taps.apply(len).astype(bool)
+ nonnull_taps = taps.loc[nonnull]
+ surrounded = nonnull_taps.str.startswith("<") & nonnull_taps.str.endswith(">")
+ nonnull_taps.loc[surrounded] = nonnull_taps.loc[surrounded].str[1:-1]
+ slash_sep = (~nonnull_taps.str.contains(";")) & nonnull_taps.str.contains("/")
+ nonnull_taps.loc[slash_sep] = nonnull_taps.loc[slash_sep].str.replace("/", ";")
+ nonnull_taps.loc[nonnull_taps == "0"] = "0;0"
+ data[key].loc[nonnull, ("Phase Shifting Properties", "Taps used for RAO")] = nonnull_taps
+ data[key].loc[~nonnull, ("Phase Shifting Properties", "Taps used for RAO")] = "0;0"
+
+ # --- phase shifter with double info
+ cols = ["Phase Regulation δu (%)", "Angle Regulation δu (%)"]
+ for col in cols:
+ if is_object_dtype(data[key].loc[:, ("Phase Shifting Properties", col)]):
+ tr_double = data[key].index[data[key].loc[:, (
+ "Phase Shifting Properties", col)].str.contains("/").fillna(0).astype(bool)]
+ data[key].loc[tr_double, ("Phase Shifting Properties", col)] = data[key].loc[
+ tr_double, ("Phase Shifting Properties", col)].str.split("/", expand=True)[
+ 1].str.replace(",", ".").astype(float).values # take second info and correct
+ # separation: , -> .
+
+ return html_str
+
+
+def _parse_html_str(html_str: str) -> pd.DataFrame:
+ """Converts ths geodata from the html file (information hidden in the string), from Lines in
+ particular, to a DataFrame that can be used later in _add_bus_geo()
+
+ Parameters
+ ----------
+ html_str : str
+ html file that includes geodata information
+
+ Returns
+ -------
+ pd.DataFrame
+ extracted geodata for a later and easy use
+ """
+ def _filter_name(st: str) -> str:
+ name_start = "NE name: "
+ name_end = ""
+ pos0 = st.find(name_start) + len(name_start)
+ pos1 = st.find(name_end, pos0)
+ assert pos0 >= 0
+ assert pos1 >= len(name_start)
+ return st[pos0:pos1]
+
+ json_start_str = '')
+ json_str = html_str[json_start_pos:(json_start_pos+json_end_pos)]
+ geo_data = json.loads(json_str)
+ geo_data = geo_data["x"]["calls"]
+ methods_pos = pd.Series({item["method"]: i for i, item in enumerate(geo_data)})
+ polylines = geo_data[methods_pos.at["addPolylines"]]["args"]
+ EIC_start = "EIC Code: "
+ if len(polylines[6]) != len(polylines[0]):
+ raise AssertionError("The lists of EIC Code data and geo data are not of the same length.")
+ line_EIC = [polylines[6][i][polylines[6][i].find(EIC_start)+len(EIC_start):] for i in range(
+ len(polylines[6]))]
+ line_name = [_filter_name(polylines[6][i]) for i in range(len(polylines[6]))]
+ line_geo_data = pd.concat([_lng_lat_to_df(polylines[0][i][0][0], line_EIC[i], line_name[i]) for
+ i in range(len(polylines[0]))], ignore_index=True)
+
+ # remove trailing whitespaces
+ for col in ["EIC_Code", "name"]:
+ line_geo_data[col] = line_geo_data[col].str.strip()
+
+ return line_geo_data
+
+
+def _create_buses_from_line_data(net: pandapowerNet, data: dict[str, pd.DataFrame]) -> None:
+ """Creates buses to the pandapower net using information from the lines and tielines sheets
+ (excel file).
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ net to be filled by buses
+ data : dict[str, pd.DataFrame]
+ data provided by the excel file which will be corrected
+ """
+ bus_df_empty = pd.DataFrame({"name": str(), "vn_kv": float(), "TSO": str()}, index=[])
+ bus_df = deepcopy(bus_df_empty)
+ for key in ["Lines", "Tielines"]:
+ for subst in ['Substation_1', 'Substation_2']:
+ data_col_tuples = [(subst, "Full_name"), (None, "Voltage_level(kV)"), (None, "TSO")]
+ to_add = data[key].loc[:, data_col_tuples].set_axis(bus_df.columns, axis="columns")
+ if len(bus_df):
+ bus_df = pd.concat([bus_df, to_add])
+ else:
+ bus_df = to_add
+ bus_df = _drop_duplicates_and_join_TSO(bus_df)
+ new_bus_idx = create_buses(
+ net, len(bus_df), vn_kv=bus_df.vn_kv, name=bus_df.name, zone=bus_df.TSO)
+ assert np.allclose(new_bus_idx, bus_df.index)
+
+
+def _create_lines(
+ net: pandapowerNet,
+ data: dict[str, pd.DataFrame],
+ max_i_ka_fillna: Union[float, int]) -> None:
+ """Creates lines to the pandapower net using information from the lines and tielines sheets
+ (excel file).
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ net to be filled by buses
+ data : dict[str, pd.DataFrame]
+ data provided by the excel file which will be corrected
+ max_i_ka_fillna : float | int
+ value to fill missing values or data of false type in max_i_ka of lines and transformers.
+ If no value should be set, you can also pass np.nan.
+ """
+
+ bus_idx = _get_bus_idx(net)
+
+ for key in ["Lines", "Tielines"]:
+ length_km = data[key][("Electrical Parameters", "Length_(km)")].values
+ zero_length = np.isclose(length_km, 0)
+ no_length = np.isnan(length_km)
+ if sum(zero_length) or sum(no_length):
+ logger.warning(f"According to given data, {sum(zero_length)} {key.lower()} have zero "
+ f"length and {sum(zero_length)} {key.lower()} have no length data. "
+ "Both types of wrong data are replaced by 1 km.")
+ length_km[zero_length | no_length] = 1
+ vn_kvs = data[key].loc[:, (None, "Voltage_level(kV)")].values
+
+ _ = create_lines_from_parameters(
+ net,
+ bus_idx.loc[list(tuple(zip(data[key].loc[:, ("Substation_1", "Full_name")].values,
+ vn_kvs)))].values,
+ bus_idx.loc[list(tuple(zip(data[key].loc[:, ("Substation_2", "Full_name")].values,
+ vn_kvs)))].values,
+ length_km,
+ data[key][("Electrical Parameters", "Resistance_R(Ω)")].values / length_km,
+ data[key][("Electrical Parameters", "Reactance_X(Ω)")].values / length_km,
+ data[key][("Electrical Parameters", "Susceptance_B(μS)")].values / length_km,
+ data[key][("Maximum Current Imax (A)", "Fixed")].fillna(
+ max_i_ka_fillna*1e3).values / 1e3,
+ name=data[key].xs("NE_name", level=1, axis=1).values[:, 0],
+ EIC_Code=data[key].xs("EIC_Code", level=1, axis=1).values[:, 0],
+ TSO=data[key].xs("TSO", level=1, axis=1).values[:, 0],
+ Comment=data[key].xs("Comment", level=1, axis=1).values[:, 0],
+ Tieline=key == "Tielines",
+ )
+
+
+def _create_transformers_and_buses(
+ net: pandapowerNet, data: dict[str, pd.DataFrame], **kwargs) -> None:
+ """Creates transformers to the pandapower net using information from the transformers sheet
+ (excel file).
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ net to be filled by buses
+ data : dict[str, pd.DataFrame]
+ data provided by the excel file which will be corrected
+ """
+
+ # --- data preparations
+ key = "Transformers"
+ bus_idx = _get_bus_idx(net)
+ vn_hv_kv, vn_lv_kv = _get_transformer_voltages(data, bus_idx)
+ trafo_connections = _allocate_trafos_to_buses_and_create_buses(
+ net, data, bus_idx, vn_hv_kv, vn_lv_kv, **kwargs)
+ max_i_a = data[key].loc[:, ("Maximum Current Imax (A) primary", "Fixed")]
+ empty_i_idx = max_i_a.index[max_i_a.isnull()]
+ max_i_a.loc[empty_i_idx] = data[key].loc[empty_i_idx, (
+ "Maximum Current Imax (A) primary", "Max")].values
+ sn_mva = np.sqrt(3) * max_i_a * vn_hv_kv / 1e3
+ z_pu = vn_lv_kv**2 / sn_mva
+ rk = data[key].xs("Resistance_R(Ω)", level=1, axis=1).values[:, 0] / z_pu
+ xk = data[key].xs("Reactance_X(Ω)", level=1, axis=1).values[:, 0] / z_pu
+ b0 = data[key].xs("Susceptance_B (µS)", level=1, axis=1).values[:, 0] * 1e-6 * z_pu
+ g0 = data[key].xs("Conductance_G (µS)", level=1, axis=1).values[:, 0] * 1e-6 * z_pu
+ zk = np.sqrt(rk**2 + xk**2)
+ vk_percent = np.sign(xk) * zk * 100
+ vkr_percent = rk * 100
+ pfe_kw = g0 * sn_mva * 1e3
+ i0_percent = 100 * np.sqrt(b0**2 + g0**2) * net.sn_mva / sn_mva
+ taps = data[key].loc[:, ("Phase Shifting Properties", "Taps used for RAO")].str.split(
+ ";", expand=True).astype(int).set_axis(["tap_min", "tap_max"], axis=1)
+
+ du = _get_float_column(data[key], ("Phase Shifting Properties", "Phase Regulation δu (%)"))
+ dphi = _get_float_column(data[key], ("Phase Shifting Properties", "Angle Regulation δu (%)"))
+ phase_shifter = np.isclose(du, 0) & (~np.isclose(dphi, 0)) # Symmetrical/Asymmetrical not
+ # considered
+
+ _ = create_transformers_from_parameters(
+ net,
+ trafo_connections.hv_bus.values,
+ trafo_connections.lv_bus.values,
+ sn_mva,
+ vn_hv_kv,
+ vn_lv_kv,
+ vkr_percent,
+ vk_percent,
+ pfe_kw,
+ i0_percent,
+ shift_degree=data[key].xs("Theta θ (°)", level=1, axis=1).values[:, 0],
+ tap_pos=0,
+ tap_neutral=0,
+ tap_side="lv",
+ tap_min=taps["tap_min"].values,
+ tap_max=taps["tap_max"].values,
+ tap_phase_shifter=phase_shifter,
+ tap_step_percent=du,
+ tap_step_degree=dphi,
+ name=data[key].loc[:, ("Location", "Full Name")].str.strip().values,
+ EIC_Code=data[key].xs("EIC_Code", level=1, axis=1).values[:, 0],
+ TSO=data[key].xs("TSO", level=1, axis=1).values[:, 0],
+ Comment=data[key].xs("Comment", level=1, axis=1).replace("\xa0", "").values[:, 0],
+ )
+
+
+def _invent_connections_between_grid_groups(
+ net: pandapowerNet, minimal_trafo_invention: bool = False, **kwargs) -> None:
+ """Adds connections between islanded grid groups via:
+
+ - adding transformers between equally named buses that have different voltage level and lay in different groups
+ - merge buses of same voltage level, different grid groups and equal name base
+ - fuse buses that are close to each other
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ net to be manipulated
+ minimal_trafo_invention : bool, optional
+ if True, adding transformers stops when no grid groups is islanded anymore (does not apply
+ for release version 5 or 6, i.e. it does not care what value is passed to
+ minimal_trafo_invention). If False, all equally named buses that have different voltage
+ level and lay in different groups will be connected via additional transformers,
+ by default False
+ """
+ grid_groups = get_grid_groups(net)
+ bus_idx = _get_bus_idx(net)
+ bus_grid_groups = pd.concat([pd.Series(group, index=buses) for group, buses in zip(
+ grid_groups.index, grid_groups.buses)]).sort_index()
+
+ # treat for example "Wuergau" equally as "Wuergau (2)":
+ location_names = pd.Series(bus_idx.index.get_level_values(0))
+ location_names = location_names.str.replace(r"(.) \([0-9]+\)", r"\1", regex=True)
+ bus_idx.index = pd.MultiIndex.from_arrays(
+ [location_names.values, bus_idx.index.get_level_values(1).to_numpy()],
+ names=bus_idx.index.names)
+
+ # --- add Transformers between equally named buses that have different voltage level and lay in
+ # --- different groups
+ connected_vn_kvs_by_trafos = pd.DataFrame({
+ "hv": net.bus.vn_kv.loc[net.trafo.hv_bus.values].values,
+ "lv": net.bus.vn_kv.loc[net.trafo.lv_bus.values].values,
+ "index": net.trafo.index}).set_index(["hv", "lv"]).sort_index()
+ dupl_location_names = location_names[location_names.duplicated()]
+
+ for location_name in dupl_location_names:
+ if minimal_trafo_invention and len(bus_grid_groups.unique()) <= 1:
+ break # break with regard to minimal_trafo_invention
+ grid_groups_at_location = bus_grid_groups.loc[bus_idx.loc[location_name].values]
+ grid_groups_at_location = grid_groups_at_location.drop_duplicates()
+ if len(grid_groups_at_location) < 2:
+ continue
+ elif len(grid_groups_at_location) > 2:
+ raise NotImplementedError("Code is not provided to invent Transformer connections "
+ "between locations with more than two grid groups, i.e. "
+ "voltage levels.")
+ TSO = net.bus.zone.at[grid_groups_at_location.index[0]]
+ vn_kvs = net.bus.vn_kv.loc[grid_groups_at_location.index].sort_values(ascending=False)
+ try:
+ trafos_connecting_same_voltage_levels = \
+ connected_vn_kvs_by_trafos.loc[tuple(vn_kvs)]
+ except KeyError:
+ logger.info(f"For location {location_name}, no transformer data can be reused since "
+ f"no transformer connects {vn_kvs.sort_values(ascending=False).iat[0]} kV "
+ f"and {vn_kvs.sort_values(ascending=False).iat[1]} kV.")
+ continue
+ trafos_of_same_TSO = trafos_connecting_same_voltage_levels.loc[(net.bus.zone.loc[
+ net.trafo.hv_bus.loc[trafos_connecting_same_voltage_levels.values.flatten(
+ )].values] == TSO).values].values.flatten()
+
+ # from which trafo parameters are copied:
+ tr_to_be_copied = trafos_of_same_TSO[0] if len(trafos_of_same_TSO) else \
+ trafos_connecting_same_voltage_levels.values.flatten()[0]
+
+ # copy transformer data
+ duplicated_row = net.trafo.loc[[tr_to_be_copied]].copy()
+ duplicated_row.index = [net.trafo.index.max() + 1] # adjust index
+ duplicated_row.hv_bus = vn_kvs.index[0] # adjust hv_bus, lv_bus
+ duplicated_row.lv_bus = vn_kvs.index[1] # adjust hv_bus, lv_bus
+ duplicated_row.name = "additional transformer to connect the grid"
+ net.trafo = pd.concat([net.trafo, duplicated_row])
+
+ bus_grid_groups.loc[bus_grid_groups == grid_groups_at_location.iat[1]] = \
+ grid_groups_at_location.iat[0]
+
+ # --- merge buses of same voltage level, different grid groups and equal name base
+ bus_name_splits = net.bus.name.str.split(r"[ -/]+", expand=True)
+ buses_with_single_base = net.bus.name.loc[(~bus_name_splits.isnull()).sum(axis=1) == 1]
+ for idx, name_base in buses_with_single_base.items():
+ same_name_base = net.bus.drop(idx).name.str.contains(name_base)
+ if not any(same_name_base):
+ continue
+ other_group = bus_grid_groups.drop(idx) != bus_grid_groups.at[idx]
+ same_vn = net.bus.drop(idx).vn_kv == net.bus.vn_kv.at[idx]
+ is_fuse_candidate = same_name_base & other_group & same_vn
+ if not any(is_fuse_candidate):
+ continue
+ to_fuse = bus_grid_groups.drop(idx).loc[is_fuse_candidate].drop_duplicates()
+ fuse_buses(net, idx, set(to_fuse.index))
+
+ bus_grid_groups.loc[bus_grid_groups.isin(bus_grid_groups.drop(idx).loc[
+ is_fuse_candidate].unique())] = grid_groups_at_location.iat[0]
+ bus_grid_groups = bus_grid_groups.drop(to_fuse.index)
+
+ # --- fuse buses that are close to each other
+ for name1, name2 in [("CROISIERE", "BOLLENE (POSTE RESEAU)"),
+ ("CAEN", "DRONNIERE (LA)"),
+ ("TRINITE-VICTOR", "MENTON/TRINITE VICTOR")]:
+ b1 = net.bus.index[net.bus.name == name1]
+ b2 = net.bus.index[net.bus.name == name2]
+ if len(b1) == 1 and len(b2) >= 1:
+ fuse_buses(net, b1[0], set(b2))
+ bus_grid_groups = bus_grid_groups.drop(b2)
+ else:
+ logger.info("Buses of the following names were intended to be fused but were not found."
+ f"\n'{name1}' and '{name2}'")
+
+
+def drop_islanded_grid_groups(
+ net: pandapowerNet,
+ min_bus_number: Union[int, str],
+ **kwargs) -> None:
+ """Drops grid groups that are islanded and include a number of buses below min_bus_number.
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ net in which islanded grid groups will be dropped
+ min_bus_number : int | str, optional
+ Threshold value to decide which small grid groups should be dropped and which large grid
+ groups should be kept. If all islanded grid groups should be dropped except of the one
+ largest, set "max". If all grid groups that do not contain a slack element should be
+ dropped, set "unsupplied".
+ """
+ def _grid_groups_to_drop_by_min_bus_number():
+ return grid_groups.loc[grid_groups["n_buses"] < min_bus_number]
+
+ grid_groups = get_grid_groups(net, **kwargs)
+
+ if min_bus_number == "unsupplied":
+ slack_buses = set(net.ext_grid.loc[net.ext_grid.in_service, "bus"]) | \
+ set(net.gen.loc[net.gen.in_service & net.gen.slack, "bus"])
+ grid_groups_to_drop = grid_groups.loc[~grid_groups.buses.apply(
+ lambda x: not x.isdisjoint(slack_buses))]
+
+ elif min_bus_number == "max":
+ min_bus_number = grid_groups["n_buses"].max()
+ grid_groups_to_drop = _grid_groups_to_drop_by_min_bus_number()
+
+ elif isinstance(min_bus_number, int):
+ grid_groups_to_drop = _grid_groups_to_drop_by_min_bus_number()
+
+ else:
+ raise NotImplementedError(
+ f"{min_bus_number=} is not implemented. Use an int, 'max', or 'unsupplied' instead.")
+
+ buses_to_drop = reduce(set.union, grid_groups_to_drop.buses)
+ drop_buses(net, buses_to_drop)
+ logger.info(f"drop_islanded_grid_groups() drops {len(grid_groups_to_drop)} grid groups with a "
+ f"total of {grid_groups_to_drop.n_buses.sum()} buses.")
+
+
+def _add_bus_geo(net: pandapowerNet, line_geo_data: pd.DataFrame) -> None:
+ """Adds geodata to the buses. The function needs to handle cases where line_geo_data does not
+ include no or multiple geodata per bus. Primarly, the geodata are allocate via EIC Code names,
+ if ambigous, names are considered.
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ net in which geodata are added to the buses
+ line_geo_data : pd.DataFrame
+ Converted geodata from the html file
+ """
+ iSl = pd.IndexSlice
+ lgd_EIC_bus = line_geo_data.pivot_table(values="value", index=["EIC_Code", "bus"],
+ columns="geo_dim")
+ lgd_name_bus = line_geo_data.pivot_table(values="value", index=["name", "bus"],
+ columns="geo_dim")
+ lgd_EIC_bus_idx_extended = pd.MultiIndex.from_frame(lgd_EIC_bus.index.to_frame().assign(
+ **dict(col_name="EIC_Code")).rename(columns=dict(EIC_Code="identifier")).loc[
+ :, ["col_name", "identifier", "bus"]])
+ lgd_name_bus_idx_extended = pd.MultiIndex.from_frame(lgd_name_bus.index.to_frame().assign(
+ **dict(col_name="name")).rename(columns=dict(name="identifier")).loc[
+ :, ["col_name", "identifier", "bus"]])
+ lgd_bus = pd.concat([lgd_EIC_bus.set_axis(lgd_EIC_bus_idx_extended),
+ lgd_name_bus.set_axis(lgd_name_bus_idx_extended)])
+ dupl_EICs = net.line.EIC_Code.loc[net.line.EIC_Code.duplicated()]
+ dupl_names = net.line.name.loc[net.line.name.duplicated()]
+
+ def _geo_json_str(this_bus_geo: pd.Series) -> str:
+ return f'{{"coordinates": [{this_bus_geo.at["lng"]}, {this_bus_geo.at["lat"]}], "type": "Point"}}'
+
+ def _add_bus_geo_inner(bus: int) -> Optional[str]:
+ from_bus_line_excerpt = net.line.loc[net.line.from_bus ==
+ bus, ["EIC_Code", "name", "Tieline"]]
+ to_bus_line_excerpt = net.line.loc[net.line.to_bus == bus, ["EIC_Code", "name", "Tieline"]]
+ line_excerpt = pd.concat([from_bus_line_excerpt, to_bus_line_excerpt])
+ n_connected_line_ends = len(line_excerpt)
+ if n_connected_line_ends == 0:
+ logger.error(
+ f"Bus {bus} (name {net.bus.at[bus, 'name']}) is not found in line_geo_data.")
+ return None
+ is_dupl = pd.concat([
+ pd.DataFrame({"EIC": from_bus_line_excerpt.EIC_Code.isin(dupl_EICs).values,
+ "name": from_bus_line_excerpt.name.isin(dupl_names).values},
+ index=pd.MultiIndex.from_product([["from"], from_bus_line_excerpt.index],
+ names=["bus", "line_index"])),
+ pd.DataFrame({"EIC": to_bus_line_excerpt.EIC_Code.isin(dupl_EICs).values,
+ "name": to_bus_line_excerpt.name.isin(dupl_names).values},
+ index=pd.MultiIndex.from_product([["to"], to_bus_line_excerpt.index],
+ names=["bus", "line_index"]))
+ ])
+ is_missing = pd.DataFrame({
+ "EIC": ~line_excerpt.EIC_Code.isin(
+ lgd_bus.loc["EIC_Code"].index.get_level_values("identifier")),
+ "name": ~line_excerpt.name.isin(
+ lgd_bus.loc["name"].index.get_level_values("identifier"))
+ }).set_axis(is_dupl.index)
+ is_tieline = pd.Series(net.line.loc[is_dupl.index.get_level_values("line_index"),
+ "Tieline"].values, index=is_dupl.index)
+
+ # --- construct access_vals, i.e. values to take line geo data from lgd_bus
+ # --- if not duplicated, take "EIC_Code". Otherwise and if not dupl, take "name".
+ # --- Otherwise ignore. Do it for both from and to bus
+ access_vals = pd.DataFrame({
+ "col_name": "EIC_Code",
+ "identifier": line_excerpt.EIC_Code.values,
+ "bus": is_dupl.index.get_level_values("bus").values
+ }) # default is EIC_Code
+ take_from_name = ((is_dupl.EIC | is_missing.EIC) & (
+ ~is_dupl.name & ~is_missing.name)).values
+ access_vals.loc[take_from_name, "col_name"] = "name"
+ access_vals.loc[take_from_name, "identifier"] = line_excerpt.name.loc[take_from_name].values
+ keep = (~(is_dupl | is_missing)).any(axis=1).values
+ if np.all(is_missing):
+ log_msg = (f"For bus {bus} (name {net.bus.at[bus, 'name']}), {n_connected_line_ends} "
+ "were found but no EIC_Codes or names of corresponding lines were found ."
+ "in the geo data from the html file.")
+ if is_tieline.all():
+ logger.debug(log_msg)
+ else:
+ logger.warning(log_msg)
+ return None
+ elif sum(keep) == 0:
+ logger.info(f"For {bus=}, all EIC_Codes and names of connected lines are ambiguous. "
+ "No geo data is dropped at this point.")
+ keep[(~is_missing).any(axis=1)] = True
+ access_vals = access_vals.loc[keep]
+
+ # --- get this_bus_geo from EIC_Code or name with regard to access_vals
+ this_bus_geo = lgd_bus.loc[iSl[
+ access_vals.col_name, access_vals.identifier, access_vals.bus], :]
+
+ if len(this_bus_geo) > 1:
+ # reduce similar/equal lines
+ this_bus_geo = this_bus_geo.loc[this_bus_geo.round(2).drop_duplicates().index]
+
+ # --- return geo_json_str
+ len_this_bus_geo = len(this_bus_geo)
+ if len_this_bus_geo == 1:
+ return _geo_json_str(this_bus_geo.iloc[0])
+ elif len_this_bus_geo == 2:
+ how_often = pd.Series(
+ [sum(np.isclose(lgd_EIC_bus["lat"], this_bus_geo["lat"].iat[i]) &
+ np.isclose(lgd_EIC_bus["lng"], this_bus_geo["lng"].iat[i])) for i in
+ range(len_this_bus_geo)], index=this_bus_geo.index)
+ if how_often.at[how_often.idxmax()] >= 1:
+ logger.warning(f"Bus {bus} (name {net.bus.at[bus, 'name']}) was found multiple times"
+ " in line_geo_data. No value exists more often than others. "
+ "The first of most used geo positions is used.")
+ return _geo_json_str(this_bus_geo.loc[how_often.idxmax()])
+
+ net.bus.geo = [_add_bus_geo_inner(bus) for bus in net.bus.index]
+
+
+# --- tertiary functions ---------------------------------------------------------------------------
+
+def _float_col_comma_correction(data: dict[str, pd.DataFrame], key: str, col_names: list):
+ for col_name in col_names:
+ data[key][col_name] = pd.to_numeric(data[key][col_name].astype(str).str.replace(
+ ",", "."), errors="coerce")
+
+
+def _get_transformer_voltages(
+ data: dict[str, pd.DataFrame], bus_idx: pd.Series) -> tuple[np.ndarray, np.ndarray]:
+
+ key = "Transformers"
+ vn = data[key].loc[:, [("Voltage_level(kV)", "Primary"),
+ ("Voltage_level(kV)", "Secondary")]].values
+ vn_hv_kv = np.max(vn, axis=1)
+ vn_lv_kv = np.min(vn, axis=1)
+ if is_integer_dtype(list(bus_idx.index.dtypes)[1]):
+ vn_hv_kv = vn_hv_kv.astype(int)
+ vn_lv_kv = vn_lv_kv.astype(int)
+
+ return vn_hv_kv, vn_lv_kv
+
+
+def _allocate_trafos_to_buses_and_create_buses(
+ net: pandapowerNet, data: dict[str, pd.DataFrame], bus_idx: pd.Series,
+ vn_hv_kv: np.ndarray, vn_lv_kv: np.ndarray,
+ rel_deviation_threshold_for_trafo_bus_creation: float = 0.2,
+ log_rel_vn_deviation: float = 0.12, **kwargs) -> pd.DataFrame:
+ """Provides a DataFrame of data to allocate transformers to the buses according to their
+ location names. If locations of transformers do not exist due to the data of the lines and
+ tielines sheets, additional buses are created. If locations exist but have a far different
+ voltage level than the transformer, either a warning is logged or additional buses are created
+ according to rel_deviation_threshold_for_trafo_bus_creation and log_rel_vn_deviation.
+
+ Parameters
+ ----------
+ net : pandapowerNet
+ pandapower net
+ data : dict[str, pd.DataFrame]
+ _description_
+ bus_idx : pd.Series
+ Series of indices and corresponding location names and voltage levels in the MultiIndex of
+ the Series
+ vn_hv_kv : np.ndarray
+ nominal voltages of the hv side of the transformers
+ vn_lv_kv : np.ndarray
+ Nominal voltages of the lv side of the transformers
+ rel_deviation_threshold_for_trafo_bus_creation : float, optional
+ If the voltage level of transformer locations is far different than the transformer data,
+ additional buses are created. rel_deviation_threshold_for_trafo_bus_creation defines the
+ tolerance in which no additional buses are created. By default 0.2
+ log_rel_vn_deviation : float, optional
+ This parameter allows a range below rel_deviation_threshold_for_trafo_bus_creation in which
+ a warning is logged instead of a creating additional buses. By default 0.12
+
+ Returns
+ -------
+ pd.DataFrame
+ information to which bus the trafos should be connected to. Columns are
+ ["name", "hv_bus", "lv_bus", "vn_hv_kv", "vn_lv_kv", ...]
+ """
+
+ if rel_deviation_threshold_for_trafo_bus_creation < log_rel_vn_deviation:
+ logger.warning(
+ f"Given parameters violates the ineqation "
+ f"{rel_deviation_threshold_for_trafo_bus_creation=} >= {log_rel_vn_deviation=}. "
+ f"Therefore, rel_deviation_threshold_for_trafo_bus_creation={log_rel_vn_deviation} "
+ "is assumed.")
+ rel_deviation_threshold_for_trafo_bus_creation = log_rel_vn_deviation
+
+ key = "Transformers"
+ bus_location_names = set(net.bus.name)
+ trafo_bus_names = data[key].loc[:, ("Location", "Full Name")]
+ trafo_location_names = _find_trafo_locations(trafo_bus_names, bus_location_names)
+
+ # --- construct DataFrame trafo_connections including all information on trafo allocation to
+ # --- buses
+ empties = -1*np.ones(len(vn_hv_kv), dtype=int)
+ trafo_connections = pd.DataFrame({
+ "name": trafo_location_names,
+ "hv_bus": empties,
+ "lv_bus": empties,
+ "vn_hv_kv": vn_hv_kv,
+ "vn_lv_kv": vn_lv_kv,
+ "vn_hv_kv_next_bus": vn_hv_kv,
+ "vn_lv_kv_next_bus": vn_lv_kv,
+ "hv_rel_deviation": np.zeros(len(vn_hv_kv)),
+ "lv_rel_deviation": np.zeros(len(vn_hv_kv)),
+ })
+ trafo_connections[["hv_bus", "lv_bus"]] = trafo_connections[[
+ "hv_bus", "lv_bus"]].astype(np.int64)
+
+ for side in ["hv", "lv"]:
+ bus_col, trafo_vn_col, next_col, rel_dev_col, has_dev_col = \
+ f"{side}_bus", f"vn_{side}_kv", f"vn_{side}_kv_next_bus", f"{side}_rel_deviation", \
+ f"trafo_{side}_to_bus_deviation"
+ name_vn_series = pd.Series(
+ tuple(zip(trafo_location_names, trafo_connections[trafo_vn_col])))
+ isin = name_vn_series.isin(bus_idx.index)
+ trafo_connections[has_dev_col] = ~isin
+ trafo_connections.loc[isin, bus_col] = bus_idx.loc[name_vn_series.loc[isin]].values
+
+ # --- code to find bus locations with vn deviation
+ next_vn = np.array([bus_idx.loc[tln.name].index.values[
+ (pd.Series(bus_idx.loc[tln.name].index) - getattr(tln, trafo_vn_col)).abs().idxmin(
+ )] for tln in trafo_connections.loc[~isin, ["name", trafo_vn_col]].itertuples()])
+ trafo_connections.loc[~isin, next_col] = next_vn
+ rel_dev = np.abs(next_vn - trafo_connections.loc[~isin, trafo_vn_col].values) / next_vn
+ trafo_connections.loc[~isin, rel_dev_col] = rel_dev
+ trafo_connections.loc[~isin, bus_col] = \
+ bus_idx.loc[list(tuple(zip(trafo_connections.loc[~isin, "name"],
+ trafo_connections.loc[~isin, next_col])))].values
+
+ # --- create buses to avoid too large vn deviations between nodes and transformers
+ need_bus_creation = trafo_connections[rel_dev_col] > \
+ rel_deviation_threshold_for_trafo_bus_creation
+ new_bus_data = pd.DataFrame({
+ "vn_kv": trafo_connections.loc[need_bus_creation, trafo_vn_col].values,
+ "name": trafo_connections.loc[need_bus_creation, "name"].values,
+ "TSO": data[key].loc[need_bus_creation, ("Location", "TSO")].values
+ })
+ new_bus_data_dd = _drop_duplicates_and_join_TSO(new_bus_data)
+ new_bus_idx = create_buses(net, len(new_bus_data_dd), vn_kv=new_bus_data_dd.vn_kv,
+ name=new_bus_data_dd.name, zone=new_bus_data_dd.TSO)
+ trafo_connections.loc[need_bus_creation, bus_col] = net.bus.loc[new_bus_idx, [
+ "name", "vn_kv"]].reset_index().set_index(["name", "vn_kv"]).loc[list(new_bus_data[[
+ "name", "vn_kv"]].itertuples(index=False, name=None))].values
+ trafo_connections.loc[need_bus_creation, next_col] = \
+ trafo_connections.loc[need_bus_creation, trafo_vn_col].values
+ trafo_connections.loc[need_bus_creation, rel_dev_col] = 0
+ trafo_connections.loc[need_bus_creation, has_dev_col] = False
+
+ # --- create buses for trafos that are connected to the same bus at both sides (possible if
+ # --- vn_hv_kv < vn_lv_kv *(1+rel_deviation_threshold_for_trafo_bus_creation) which usually
+ # --- occurs for PSTs only)
+ same_bus_connection = trafo_connections.hv_bus == trafo_connections.lv_bus
+ duplicated_buses = net.bus.loc[trafo_connections.loc[same_bus_connection, "lv_bus"]].copy()
+ duplicated_buses["name"] += " (2)"
+ duplicated_buses.index = list(range(net.bus.index.max()+1,
+ net.bus.index.max()+1+len(duplicated_buses)))
+ trafo_connections.loc[same_bus_connection, "lv_bus"] = duplicated_buses.index
+ net.bus = pd.concat([net.bus, duplicated_buses])
+ if n_add_buses := len(duplicated_buses):
+ tr_names = data[key].loc[trafo_connections.index[same_bus_connection],
+ ("Location", "Full Name")]
+ are_PSTs = tr_names.str.contains("PST")
+ logger.info(f"{n_add_buses} additional buses were created to avoid that transformers are "
+ f"connected to the same bus at both side, hv and lv. Of the causing "
+ f"{len(tr_names)} transformers, {sum(are_PSTs)} contain 'PST' in their name. "
+ f"According to this converter, the power flows over all these transformers will"
+ f" end at the additional buses. Please consider to connect lines with the "
+ f"additional buses, so that the power flow is over the (PST) transformers into "
+ f"the lines.")
+
+ # --- log according to log_rel_vn_deviation
+ for side in ["hv", "lv"]:
+ need_logging = trafo_connections.loc[trafo_connections[has_dev_col],
+ rel_dev_col] > log_rel_vn_deviation
+ if n_need_logging := sum(need_logging):
+ max_dev = trafo_connections[rel_dev_col].max()
+ idx_max_dev = trafo_connections[rel_dev_col].idxmax()
+ logger.warning(
+ f"For {n_need_logging} Transformers ({side} side), only locations were found (orig"
+ f"in are the line and tieline data) that have a higher relative deviation than "
+ f"{log_rel_vn_deviation}. The maximum relative deviation is {max_dev} which "
+ f"results from a Transformer rated voltage of "
+ f"{trafo_connections.at[idx_max_dev, trafo_vn_col]} and a bus "
+ f"rated voltage (taken from Lines/Tielines data sheet) of "
+ f"{trafo_connections.at[idx_max_dev, next_col]}. The best locations were "
+ f"nevertheless applied, due to {rel_deviation_threshold_for_trafo_bus_creation=}")
+
+ assert (trafo_connections.hv_bus > -1).all()
+ assert (trafo_connections.lv_bus > -1).all()
+ assert (trafo_connections.hv_bus != trafo_connections.lv_bus).all()
+
+ return trafo_connections
+
+
+def _find_trafo_locations(trafo_bus_names, bus_location_names):
+ # --- split (original and lower case) strings at " " separators to remove impeding parts for
+ # identifying the location names
+ trafo_bus_names_expended = trafo_bus_names.str.split(r"[ ]+|-A[0-9]+|-TD[0-9]+|-PF[0-9]+",
+ expand=True).fillna("").replace(" ", "")
+ trafo_bus_names_expended_lower = trafo_bus_names.str.lower().str.split(
+ r"[ ]+|-A[0-9]+|-TD[0-9]+|-PF[0-9]+", expand=True).fillna("").replace(" ", "")
+
+ # --- identify impeding parts
+ contains_number = trafo_bus_names_expended.map(lambda x: any(char.isdigit() for char in x))
+ to_drop = (trafo_bus_names_expended_lower == "tr") | (trafo_bus_names_expended_lower == "pst") \
+ | (trafo_bus_names_expended == "") | (trafo_bus_names_expended == "/") | (
+ trafo_bus_names_expended == "LIPST") | (trafo_bus_names_expended == "EHPST") | (
+ trafo_bus_names_expended == "TFO") | (trafo_bus_names_expended_lower == "trafo") | (
+ trafo_bus_names_expended_lower == "kv") | contains_number
+ trafo_bus_names_expended[to_drop] = ""
+
+ # --- reconstruct name strings for identification
+ trafo_bus_names_joined = trafo_bus_names_expended.where(~to_drop).fillna('').agg(
+ ' '.join, axis=1).str.strip()
+ trafo_bus_names_longest_part = trafo_bus_names_expended.apply(
+ lambda row: max(row, key=len), axis=1)
+ joined_in_buses = trafo_bus_names_joined.isin(bus_location_names)
+ longest_part_in_buses = trafo_bus_names_longest_part.isin(bus_location_names)
+
+ # --- check whether all name strings point at location names of the buses
+ if False: # for easy testing
+ fail = ~(joined_in_buses | longest_part_in_buses)
+ a = pd.concat([trafo_bus_names_joined.loc[fail],
+ trafo_bus_names_longest_part.loc[fail]], axis=1)
+
+ if n_bus_names_not_found := len(joined_in_buses) - sum(joined_in_buses | longest_part_in_buses):
+ raise ValueError(
+ f"For {n_bus_names_not_found} Tranformers, no suitable bus location names were found, "
+ f"i.e. the algorithm did not find a (part) of Transformers-Location-Full Name that fits"
+ " to Substation_1 or Substation_2 data in Lines or Tielines sheet.")
+
+ # --- set the trafo location names and trafo bus indices respectively
+ trafo_location_names = trafo_bus_names_longest_part
+ trafo_location_names.loc[joined_in_buses] = trafo_bus_names_joined
+
+ return trafo_location_names
+
+
+def _drop_duplicates_and_join_TSO(bus_df: pd.DataFrame) -> pd.DataFrame:
+ bus_df = bus_df.drop_duplicates(ignore_index=True)
+ # just keep one bus per name and vn_kv. If there are multiple buses of different TSOs, join the
+ # TSO strings:
+ bus_df = bus_df.groupby(["name", "vn_kv"], as_index=False).agg({"TSO": lambda x: '/'.join(x)})
+ assert not bus_df.duplicated(["name", "vn_kv"]).any()
+ return bus_df
+
+
+def _get_float_column(df, col_tuple, fill=0):
+ series = df.loc[:, col_tuple]
+ series.loc[series == "\xa0"] = fill
+ return series.astype(float).fillna(fill)
+
+
+def _get_bus_idx(net: pandapowerNet) -> pd.Series:
+ return net.bus[["name", "vn_kv"]].rename_axis("index").reset_index().set_index([
+ "name", "vn_kv"])["index"]
+
+
+def get_grid_groups(net: pandapowerNet, **kwargs) -> pd.DataFrame:
+ notravbuses_dict = dict() if "notravbuses" not in kwargs.keys() else {
+ "notravbuses": kwargs.pop("notravbuses")}
+ grid_group_buses = [set_ for set_ in connected_components(create_nxgraph(net, **kwargs),
+ **notravbuses_dict)]
+ grid_groups = pd.DataFrame({"buses": grid_group_buses})
+ grid_groups["n_buses"] = grid_groups["buses"].apply(len)
+ return grid_groups
+
+
+def _lng_lat_to_df(dict_: dict, line_EIC: str, line_name: str) -> pd.DataFrame:
+ return pd.DataFrame([
+ [line_EIC, line_name, "from", "lng", dict_["lng"][0]],
+ [line_EIC, line_name, "to", "lng", dict_["lng"][1]],
+ [line_EIC, line_name, "from", "lat", dict_["lat"][0]],
+ [line_EIC, line_name, "to", "lat", dict_["lat"][1]],
+ ], columns=["EIC_Code", "name", "bus", "geo_dim", "value"])
+
+
+def _fill_geo_at_one_sided_branches_without_geo_extent(net: pandapowerNet):
+
+ def _check_geo_availablitiy(net: pandapowerNet) -> dict[str, Union[pd.Index, int]]:
+ av = dict() # availablitiy of geodata
+ av["bus_with_geo"] = net.bus.index[~net.bus.geo.isnull()]
+ av["lines_fbw_tbwo"] = net.line.index[net.line.from_bus.isin(av["bus_with_geo"]) &
+ (~net.line.to_bus.isin(av["bus_with_geo"]))]
+ av["lines_fbwo_tbw"] = net.line.index[(~net.line.from_bus.isin(av["bus_with_geo"])) &
+ net.line.to_bus.isin(av["bus_with_geo"])]
+ av["trafos_hvbw_lvbwo"] = net.trafo.index[net.trafo.hv_bus.isin(av["bus_with_geo"]) &
+ (~net.trafo.lv_bus.isin(av["bus_with_geo"]))]
+ av["trafos_hvbwo_lvbw"] = net.trafo.index[(~net.trafo.hv_bus.isin(av["bus_with_geo"])) &
+ net.trafo.lv_bus.isin(av["bus_with_geo"])]
+ av["n_lines_one_side_geo"] = len(av["lines_fbw_tbwo"])+len(av["lines_fbwo_tbw"])
+ return av
+
+ geo_avail = _check_geo_availablitiy(net)
+ while geo_avail["n_lines_one_side_geo"]:
+
+ # copy available geodata to the other end of branches where geodata are missing
+ for et, bus_w_geo, bus_wo_geo, idx_key in zip(
+ ["line", "line", "trafo", "trafo"],
+ ["to_bus", "from_bus", "lv_bus", "hv_bus"],
+ ["from_bus", "to_bus", "hv_bus", "lv_bus"],
+ ["lines_fbwo_tbw", "lines_fbw_tbwo", "trafos_hvbwo_lvbw", "trafos_hvbw_lvbwo"]):
+ net.bus.loc[net[et].loc[geo_avail[idx_key], bus_wo_geo].values, "geo"] = \
+ net.bus.loc[net[et].loc[geo_avail[idx_key], bus_w_geo].values, "geo"].values
+ geo_avail = _check_geo_availablitiy(net)
+
+ set_line_geodata_from_bus_geodata(net)
+
+
+def _multi_str_repl(st: str, repl: list[tuple]) -> str:
+ for (old, new) in repl:
+ return st.replace(old, new)
+
+
+if __name__ == "__main__":
+ from pathlib import Path
+ import os
+ import pandapower as pp
+
+ home = str(Path.home())
+ jao_data_folder = os.path.join(home, "Documents", "JAO Static Grid Model")
+
+ release5 = os.path.join(jao_data_folder, "20240329_Core Static Grid Model – 5th release")
+ excel_file_path = os.path.join(release5, "20240329_Core Static Grid Model_public.xlsx")
+ html_file_path = os.path.join(release5, "20240329_Core Static Grid Model Map_public",
+ "2024-03-18_Core_SGM_publication.html")
+
+ release6 = os.path.join(jao_data_folder, "202409_Core Static Grid Mode_6th release")
+ excel_file_path = os.path.join(release6, "20240916_Core Static Grid Model_for publication.xlsx")
+ html_file_path = os.path.join(release6, "2024-09-13_Core_SGM_publication_files",
+ "2024-09-13_Core_SGM_publication.html")
+
+ pp_net_json_file = os.path.join(home, "desktop", "jao_grid.json")
+
+ if 1: # read from original data
+ net = from_jao(excel_file_path, html_file_path, True, drop_grid_groups_islands=True)
+ pp.to_json(net, pp_net_json_file)
+ else: # load net from already converted and stored net
+ net = pp.from_json(pp_net_json_file)
+
+ print(net)
+ grid_groups = get_grid_groups(net)
+ print(grid_groups)
+
+ _fill_geo_at_one_sided_branches_without_geo_extent(net)
diff --git a/pandapower/converter/pandamodels/from_pm.py b/pandapower/converter/pandamodels/from_pm.py
index acb22a768..a66e9d25d 100644
--- a/pandapower/converter/pandamodels/from_pm.py
+++ b/pandapower/converter/pandamodels/from_pm.py
@@ -38,11 +38,6 @@ def read_pm_results_to_net(net, ppc, ppci, result_pm):
_extract_results(net, result)
else:
neti = deepcopy(net)
- removed_keys = set(net.keys()) - pp_elements(res_elements=True) - \
- {"_options", "_is_elements", "_pd2ppc_lookups", "res_bus", "res_switch"} | \
- {"measurement"}
- for rk in removed_keys:
- neti.pop(rk)
for tp, ri in result.items():
add_time_series_data_to_net(neti, net.controller, tp)
_extract_results(neti, ri)
diff --git a/pandapower/converter/pandamodels/to_pm.py b/pandapower/converter/pandamodels/to_pm.py
index 81add6fcc..5787a1c20 100644
--- a/pandapower/converter/pandamodels/to_pm.py
+++ b/pandapower/converter/pandamodels/to_pm.py
@@ -151,6 +151,7 @@ def convert_to_pm_structure(net, opf_flow_lim="S", from_time_step=None, to_time_
ppci = build_ne_branch(net, ppci)
net["_ppc_opf"] = ppci
pm = ppc_to_pm(net, ppci)
+ # todo: somewhere here should RATE_A be converted to 0., because only PowerModels uses 0 as no limits (pypower opf converts the zero to inf)
pm = add_pm_options(pm, net)
pm = add_params_to_pm(net, pm)
if from_time_step is not None and to_time_step is not None:
diff --git a/pandapower/converter/powerfactory/pp_import_functions.py b/pandapower/converter/powerfactory/pp_import_functions.py
index 065cd3364..8db54c564 100644
--- a/pandapower/converter/powerfactory/pp_import_functions.py
+++ b/pandapower/converter/powerfactory/pp_import_functions.py
@@ -3,14 +3,17 @@
import numbers
import re
from itertools import combinations
+from typing import Literal, Optional, Union
+import geojson
import networkx as nx
-
import numpy as np
+from pandas import DataFrame
+
import pandapower as pp
+from pandapower.results import reset_results
from pandapower.auxiliary import ADict
import pandapower.control as control
-from pandas import DataFrame, Series
try:
import pandaplan.core.pplog as logging
@@ -19,21 +22,31 @@
logger = logging.getLogger(__name__)
-
-# make wrapper for GetAttribute
-def ga(element, attr):
- return element.GetAttribute(attr)
+# Define global variables
+line_dict = {}
+trafo_dict = {}
+switch_dict = {}
+bus_dict = {}
+grf_map = {}
# import network to pandapower:
-def from_pf(dict_net, pv_as_slack=True, pf_variable_p_loads='plini', pf_variable_p_gen='pgini',
- flag_graphics='GPS', tap_opt="nntap", export_controller=True, handle_us="Deactivate",
- max_iter=None, is_unbalanced=False, create_sections=True):
- global line_dict
+def from_pf(
+ dict_net,
+ pv_as_slack=True,
+ pf_variable_p_loads='plini',
+ pf_variable_p_gen='pgini',
+ flag_graphics: Literal["GPS", "no geodata"] = 'GPS',
+ tap_opt="nntap",
+ export_controller=True,
+ handle_us: Literal["Deactivate", "Drop", "Nothing"] = "Deactivate",
+ max_iter=None,
+ is_unbalanced=False,
+ create_sections=True
+):
+ global line_dict, trafo_dict, switch_dict, bus_dict, grf_map
line_dict = {}
- global trafo_dict
trafo_dict = {}
- global switch_dict
switch_dict = {}
logger.debug("__name__: %s" % __name__)
logger.debug('started from_pf')
@@ -45,10 +58,8 @@ def from_pf(dict_net, pv_as_slack=True, pf_variable_p_loads='plini', pf_variable
grid_name = dict_net['ElmNet'].loc_name
base_sn_mva = dict_net['global_parameters']['base_sn_mva']
net = pp.create_empty_network(grid_name, sn_mva=base_sn_mva)
- net['bus_geodata'] = DataFrame(columns=['x', 'y'])
- net['line_geodata'] = DataFrame(columns=['coords'])
- pp.results.reset_results(net, mode="pf_3ph")
+ reset_results(net, mode="pf_3ph")
if max_iter is not None:
pp.set_user_pf_options(net, max_iteration=max_iter)
logger.info('creating grid %s' % grid_name)
@@ -57,9 +68,7 @@ def from_pf(dict_net, pv_as_slack=True, pf_variable_p_loads='plini', pf_variable
logger.debug('creating buses')
# create buses:
- global bus_dict
bus_dict = {}
- global grf_map
grf_map = dict_net.get('graphics', {})
logger.debug('the graphic mapping is: %s' % grf_map)
@@ -342,12 +351,12 @@ def add_additional_attributes(item, net, element, element_id, attr_list=None, at
obj = item
for a in attr.split('.'):
if hasattr(obj, 'HasAttribute') and obj.HasAttribute(a):
- obj = ga(obj, a)
+ obj = obj.GetAttribute(a)
if obj is not None and isinstance(obj, str):
net[element].loc[element_id, attr_dict[attr]] = obj
elif item.HasAttribute(attr):
- chr_name = ga(item, attr)
+ chr_name = item.GetAttribute(attr)
if chr_name is not None:
if isinstance(chr_name, (str, numbers.Number)):
net[element].loc[element_id, attr_dict[attr]] = chr_name
@@ -362,24 +371,25 @@ def add_additional_attributes(item, net, element, element_id, attr_list=None, at
def create_bus(net, item, flag_graphics, is_unbalanced):
# add geo data
if flag_graphics == 'GPS':
- x = ga(item, 'e:GPSlon')
- y = ga(item, 'e:GPSlat')
+ x = item.GetAttribute('e:GPSlon')
+ y = item.GetAttribute('e:GPSlat')
elif flag_graphics == 'graphic objects':
graphic_object = get_graphic_object(item)
if graphic_object:
- x = ga(graphic_object, 'rCenterX')
- y = ga(graphic_object, 'rCenterY')
+ x = graphic_object.GetAttribute('rCenterX')
+ y = graphic_object.GetAttribute('rCenterY')
# add gr coord data
else:
x, y = 0, 0
else:
x, y = 0, 0
- # only values > 0+-1e-3 are entered into the bus_geodata
- if x > 1e-3 or y > 1e-3:
- geodata = (x, y)
- else:
- geodata = None
+ # Commented out because geojson is set up to do the precision handling
+ # # only values > 0+-1e-3 are entered into the bus.geo
+ # if x > 1e-3 or y > 1e-3:
+ # geodata = (x, y)
+ # else:
+ # geodata = None
usage = ["b", "m", "n"]
params = {
@@ -387,7 +397,7 @@ def create_bus(net, item, flag_graphics, is_unbalanced):
'vn_kv': item.uknom,
'in_service': not bool(item.outserv),
'type': usage[item.iUsage],
- 'geodata': geodata
+ 'geodata': (x, y),
}
system_type = {0: "ac", 1: "dc", 2: "ac/bi"}[item.systype]
@@ -447,12 +457,12 @@ def get_pf_bus_results(net, item, bid, is_unbalanced, system_type):
if is_unbalanced:
bus_type = "res_bus_3ph"
result_variables = {
- "pf_vm_a_pu": "m:u:A",
- "pf_va_a_degree": "m:phiu:A",
- "pf_vm_b_pu": "m:u:B",
- "pf_va_b_degree": "m:phiu:B",
- "pf_vm_c_pu": "m:u:C",
- "pf_va_c_degree": "m:phiu:C",
+ "pf_vm_a_pu": "m:u:A",
+ "pf_va_a_degree": "m:phiu:A",
+ "pf_vm_b_pu": "m:u:B",
+ "pf_va_b_degree": "m:phiu:B",
+ "pf_vm_c_pu": "m:u:C",
+ "pf_va_c_degree": "m:phiu:C",
}
elif system_type == "ac":
bus_type = "res_bus"
@@ -467,7 +477,7 @@ def get_pf_bus_results(net, item, bid, is_unbalanced, system_type):
for res_var_pp, res_var_pf in result_variables.items():
res = np.nan
if item.HasResults(0):
- res = ga(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf)
# dc bus voltage can be negative:
net[bus_type].at[bid, res_var_pp] = np.abs(res) if "vm_pu" in res_var_pp else res
@@ -475,7 +485,7 @@ def get_pf_bus_results(net, item, bid, is_unbalanced, system_type):
# # This one deletes all the results :(
# # Don't use it
# def find_bus_index_in_net(item, net=None):
-# foreign_key = int(ga(item, 'for_name'))
+# foreign_key = int(item.GetAttribute('for_name'))
# return foreign_key
@@ -484,32 +494,32 @@ def get_pf_bus_results(net, item, bid, is_unbalanced, system_type):
# def find_bus_index_in_net(item, net):
# usage = ["b", "m", "n"]
# # to be sure that the bus is the correct one
-# name = ga(item, 'loc_name')
-# bus_type = usage[ga(item, 'iUsage')]
+# name = item.GetAttribute('loc_name')
+# bus_type = usage[item.GetAttribute('iUsage')]
# logger.debug('looking for bus <%s> in net' % name)
#
# if item.HasAttribute('cpSubstat'):
-# substat = ga(item, 'cpSubstat')
+# substat = item.GetAttribute('cpSubstat')
# if substat is not None:
-# descr = ga(substat, 'loc_name')
+# descr = substat.GetAttribute('loc_name')
# logger.debug('bus <%s> has substat, descr is <%s>' % (name, descr))
# else:
# # omg so ugly :(
-# descr = ga(item, 'desc')
+# descr = item.GetAttribute('desc')
# descr = descr[0] if len(descr) > 0 else ""
# logger.debug('substat is none, descr of bus <%s> is <%s>' % (name, descr))
# else:
-# descr = ga(item, 'desc')
+# descr = item.GetAttribute('desc')
# descr = descr[0] if len(descr) > 0 else ""
# logger.debug('no attribute "substat", descr of bus <%s> is <%s>' % (name, descr))
#
# try:
-# zone = ga(item, 'Grid')
-# zone_name = ga(zone, 'loc_name').split('.ElmNet')[0]
+# zone = item.GetAttribute('Grid')
+# zone_name = zone.GetAttribute('loc_name').split('.ElmNet')[0]
# logger.debug('zone "Grid" found: <%s>' % zone_name)
# except:
-# zone = ga(item, 'cpGrid')
-# zone_name = ga(zone, 'loc_name').split('.ElmNet')[0]
+# zone = item.GetAttribute('cpGrid')
+# zone_name = zone.GetAttribute('loc_name').split('.ElmNet')[0]
# logger.debug('zone "cpGrid" found: <%s>' % zone_name)
#
# temp_df_a = net.bus[net.bus.zone == zone_name]
@@ -563,12 +573,12 @@ def get_connection_nodes(net, item, num_nodes):
item, pf_class))
if pf_class == "ElmTr2":
- v.append(ga(item, 't:utrn_h'))
- v.append(ga(item, 't:utrn_l'))
+ v.append(item.GetAttribute('t:utrn_h'))
+ v.append(item.GetAttribute('t:utrn_l'))
elif pf_class == "ElmTr3":
- v.append(ga(item, 't:utrn3_h'))
- v.append(ga(item, 't:utrn3_m'))
- v.append(ga(item, 't:utrn3_l'))
+ v.append(item.GetAttribute('t:utrn3_h'))
+ v.append(item.GetAttribute('t:utrn3_m'))
+ v.append(item.GetAttribute('t:utrn3_l'))
else:
v = [net[table].vn_kv.at[existing_bus] for _ in buses]
@@ -625,23 +635,17 @@ def create_connection_switches(net, item, number_switches, et, buses, elements):
def get_coords_from_buses(net, from_bus, to_bus, **kwargs):
- coords = []
- if from_bus in net.bus_geodata.index:
- x1, y1 = net.bus_geodata.loc[from_bus, ['x', 'y']]
- has_coords = True
- else:
- x1, y1 = np.nan, np.nan
- has_coords = False
+ coords: list[tuple[float, float]] = []
+ from_geo: Optional[str] = None
+ to_geo: Optional[str] = None
+ if from_bus in net.bus.index:
+ from_geo: str = net.bus.loc[from_bus, ['geo']]
- if to_bus in net.bus_geodata.index:
- x2, y2 = net.bus_geodata.loc[to_bus, ['x', 'y']]
- has_coords = True
- else:
- x2, y2 = np.nan, np.nan
- has_coords = False
+ if to_bus in net.bus.index:
+ to_geo: str = net.bus.loc[to_bus, ['geo']]
- if has_coords:
- coords = [[x1, y1], [x2, y2]]
+ if from_geo and to_geo:
+ coords = [geojson.utils.coords(geojson.loads(from_geo)), geojson.utils.coords(geojson.loads(to_geo))]
logger.debug('got coords from buses: %s' % coords)
else:
logger.debug('no coords for line between buses %d and %d' % (from_bus, to_bus))
@@ -656,7 +660,7 @@ def get_coords_from_item(item):
c = tuple((x, y) for [y, x] in coords)
except ValueError:
try:
- c = tuple((x, y) for [y, x, z] in coords)
+ c = tuple((x, y, z) for [y, x, z] in coords)
except ValueError:
c = []
return c
@@ -684,7 +688,6 @@ def get_coords_from_grf_object(item):
if len(coords) == 0:
coords = [[graphic_object.rCenterX, graphic_object.rCenterY]] * 2
logger.debug('extracted line coords from graphic object: %s' % coords)
- # net.line_geodata.loc[lid, 'coords'] = coords
else:
coords = []
@@ -757,13 +760,21 @@ def create_line(net, item, flag_graphics, create_sections, is_unbalanced):
logger.debug('line <%s> created' % params['name'])
-def point_len(p1, p2):
+def point_len(
+ p1: tuple[Union[float, int], Union[float, int]],
+ p2: tuple[Union[float, int], Union[float, int]]) -> float:
+ """
+ Calculate distance between p1 and p2
+ """
x1, y1 = p1
x2, y2 = p2
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
-def calc_len_coords(coords):
+def calc_len_coords(coords: list[tuple[Union[float, int], Union[float, int]]]) -> float:
+ """
+ Calculate the sum of point distances in list of coords
+ """
tot_len = 0
for i in range(len(coords) - 1):
tot_len += point_len(coords[i], coords[i + 1])
@@ -859,6 +870,11 @@ def get_section_coords(coords, sec_len, start_len, scale_factor):
def segment_buses(net, bus1, bus2, num_sections, line_name): # , sec_len, start_len, coords):
+ """
+ splits bus1, bus2 line so that it creates num_sections amount of lines.
+ Yields start, end for each line segment.
+ e.g. Yields bus1, a, a, bus2 for num_sections = 2.
+ """
yield bus1
m = 1
# if coords:
@@ -874,11 +890,13 @@ def segment_buses(net, bus1, bus2, num_sections, line_name): # , sec_len, start
# split_len = 0
while m < num_sections:
- bus_name = "%s (Muff %u)" % (line_name, m)
+ bus_name = f"{line_name} (Muff {m})"
vn_kv = net.bus.at[bus1, "vn_kv"]
zone = net.bus.at[bus1, "zone"]
- k = pp.create_bus(net, name=bus_name, type='ls', vn_kv=vn_kv, zone=zone)
+ bus = pp.create_bus(net, name=bus_name, type='ls', vn_kv=vn_kv, zone=zone)
+ # TODO: implement coords for segmentation buses.
+ # Handle coords if line has multiple coords.
# if coords:
# split_len += sec_len[m - 1] * scale_factor
#
@@ -888,9 +906,9 @@ def segment_buses(net, bus1, bus2, num_sections, line_name): # , sec_len, start
# logger.warning('bus %d has 0 coords, bus1: %d, bus2: %d' % k, bus1, bus2)
if "description" in net.bus:
- net.bus.at[k, "description"] = u""
- yield k
- yield k
+ net.bus.at[bus, "description"] = ""
+ yield bus
+ yield bus
m += 1
else:
yield bus2
@@ -902,7 +920,7 @@ def create_line_sections(net, item_list, line, bus1, bus2, coords, parallel, is_
item_list.sort(key=lambda x: x.index) # to ensure they are in correct order
if line.HasResults(-1): # -1 for 'c' results (whatever that is...)
- line_loading = ga(line, 'c:loading')
+ line_loading = line.GetAttribute('c:loading')
else:
line_loading = np.nan
@@ -928,10 +946,10 @@ def create_line_sections(net, item_list, line, bus1, bus2, coords, parallel, is_
scaling_factor = sum(sec_len) / calc_len_coords(coords)
sec_coords = get_section_coords(coords, sec_len=item.dline, start_len=item.rellen,
scale_factor=scaling_factor)
- net.line_geodata.loc[sid, 'coords'] = sec_coords
+ net.line.loc[sid, 'geo'] = geojson.dumps(geojson.LineString(sec_coords))
# p1 = sec_coords[0]
# p2 = sec_coords[-1]
- net.bus_geodata.loc[bus2, ['x', 'y']] = sec_coords[-1]
+ net.bus.loc[bus2, ['geo']] = geojson.dumps(geojson.Point(sec_coords[-1]))
except ZeroDivisionError:
logger.warning("Could not generate geodata for line !!")
@@ -1084,15 +1102,15 @@ def get_pf_line_results(net, item, lid, is_unbalanced, ac):
if is_unbalanced:
line_type = "res_line_3ph"
result_variables = {
- "pf_i_a_from_ka": "m:I:bus1:A",
- "pf_i_a_to_ka": "m:I:bus2:A",
- "pf_i_b_from_ka": "m:I:bus1:B",
- "pf_i_b_to_ka": "m:I:bus2:B",
- "pf_i_c_from_ka": "m:I:bus1:C",
- "pf_i_c_to_ka": "m:I:bus2:C",
- "pf_i_n_from_ka": "m:I0x3:bus1",
- "pf_i_n_to_ka": "m:I0x3:bus2",
- "pf_loading_percent": "c:loading",
+ "pf_i_a_from_ka": "m:I:bus1:A",
+ "pf_i_a_to_ka": "m:I:bus2:A",
+ "pf_i_b_from_ka": "m:I:bus1:B",
+ "pf_i_b_to_ka": "m:I:bus2:B",
+ "pf_i_c_from_ka": "m:I:bus1:C",
+ "pf_i_c_to_ka": "m:I:bus2:C",
+ "pf_i_n_from_ka": "m:I0x3:bus1",
+ "pf_i_n_to_ka": "m:I0x3:bus2",
+ "pf_loading_percent": "c:loading",
}
elif ac:
line_type = "res_line"
@@ -1104,7 +1122,7 @@ def get_pf_line_results(net, item, lid, is_unbalanced, ac):
for res_var_pp, res_var_pf in result_variables.items():
res = np.nan
if item.HasResults(-1): # -1 for 'c' results (whatever that is...)
- res = ga(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf)
net[line_type].at[lid, res_var_pp] = res
@@ -1132,14 +1150,14 @@ def create_line_type(net, item, cable_in_air=False):
type_data = {
"r_ohm_per_km": item.rline,
"x_ohm_per_km": item.xline,
- "c_nf_per_km": item.cline*item.frnom/50 * 1e3, # internal unit for C in PF is uF
+ "c_nf_per_km": item.cline * item.frnom / 50 * 1e3, # internal unit for C in PF is uF
"q_mm2": item.qurs,
"max_i_ka": max_i_ka if max_i_ka != 0 else 1e-3,
"endtemp_degree": item.rtemp,
"type": line_or_cable,
"r0_ohm_per_km": item.rline0,
"x0_ohm_per_km": item.xline0,
- "c0_nf_per_km": item.cline0*item.frnom/50 * 1e3, # internal unit for C in PF is uF
+ "c0_nf_per_km": item.cline0 * item.frnom / 50 * 1e3, # internal unit for C in PF is uF
"alpha": item.alpha
}
pp.create_std_type(net, type_data, name, "line")
@@ -1236,8 +1254,8 @@ def create_ext_net(net, item, pv_as_slack, is_unbalanced):
# if item.HasResults(0): # 'm' results...
# # sm:r, sm:i don't work...
# logger.debug('<%s> has results' % name)
- # net['res_' + elm].at[xid, "pf_p"] = ga(item, 'm:P:bus1')
- # net['res_' + elm].at[xid, "pf_q"] = ga(item, 'm:Q:bus1')
+ # net['res_' + elm].at[xid, "pf_p"] = item.GetAttribute('m:P:bus1')
+ # net['res_' + elm].at[xid, "pf_q"] = item.GetAttribute('m:Q:bus1')
# else:
# net['res_' + elm].at[xid, "pf_p"] = np.nan
# net['res_' + elm].at[xid, "pf_q"] = np.nan
@@ -1257,12 +1275,12 @@ def get_pf_ext_grid_results(net, item, xid, is_unbalanced):
if is_unbalanced:
ext_grid_type = "res_ext_grid_3ph"
result_variables = {
- "pf_p_a": "m:P:bus1:A",
- "pf_q_a": "m:Q:bus1:A",
- "pf_p_b": "m:P:bus1:B",
- "pf_q_b": "m:Q:bus1:B",
- "pf_p_c": "m:P:bus1:C",
- "pf_q_c": "m:Q:bus1:C",
+ "pf_p_a": "m:P:bus1:A",
+ "pf_q_a": "m:Q:bus1:A",
+ "pf_p_b": "m:P:bus1:B",
+ "pf_q_b": "m:Q:bus1:B",
+ "pf_p_c": "m:P:bus1:C",
+ "pf_q_c": "m:Q:bus1:C",
}
else:
ext_grid_type = "res_ext_grid"
@@ -1274,7 +1292,7 @@ def get_pf_ext_grid_results(net, item, xid, is_unbalanced):
for res_var_pp, res_var_pf in result_variables.items():
res = np.nan
if item.HasResults(0):
- res = ga(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf)
net[ext_grid_type].at[xid, res_var_pp] = res
@@ -1364,11 +1382,11 @@ def ask_load_params(item, pf_variable_p_loads, dict_net, variables):
if pf_variable_p_loads == 'm:P:bus1' and not item.HasResults(0):
raise RuntimeError('load %s does not have results and is ignored' % item.loc_name)
if 'p_mw' in variables:
- params.p_mw = ga(item, pf_variable_p_loads) * multiplier
+ params.p_mw = item.GetAttribute(pf_variable_p_loads) * multiplier
if 'q_mvar' in variables:
- params.q_mvar = ga(item, map_power_var(pf_variable_p_loads, 'q')) * multiplier
+ params.q_mvar = item.GetAttribute(map_power_var(pf_variable_p_loads, 'q')) * multiplier
if 'sn_mva' in variables:
- params.sn_mva = ga(item, map_power_var(pf_variable_p_loads, 's')) * multiplier
+ params.sn_mva = item.GetAttribute(map_power_var(pf_variable_p_loads, 's')) * multiplier
kap = -1 if item.pf_recap == 1 else 1
try:
@@ -1396,17 +1414,17 @@ def ask_unbalanced_load_params(item, pf_variable_p_loads, dict_net, variables):
if pf_variable_p_loads == 'm:P:bus1' and not item.HasResults(0):
raise RuntimeError('load %s does not have results and is ignored' % item.loc_name)
if 'p_mw' in variables:
- params.p_a_mw = ga(item, pf_variable_p_loads + "r")
- params.p_b_mw = ga(item, pf_variable_p_loads + "s")
- params.p_c_mw = ga(item, pf_variable_p_loads + "t")
+ params.p_a_mw = item.GetAttribute(pf_variable_p_loads + "r")
+ params.p_b_mw = item.GetAttribute(pf_variable_p_loads + "s")
+ params.p_c_mw = item.GetAttribute(pf_variable_p_loads + "t")
if 'q_mvar' in variables:
- params.q_a_mvar = ga(item, map_power_var(pf_variable_p_loads, 'q') + "r")
- params.q_b_mvar = ga(item, map_power_var(pf_variable_p_loads, 'q') + "s")
- params.q_c_mvar = ga(item, map_power_var(pf_variable_p_loads, 'q') + "t")
+ params.q_a_mvar = item.GetAttribute(map_power_var(pf_variable_p_loads, 'q') + "r")
+ params.q_b_mvar = item.GetAttribute(map_power_var(pf_variable_p_loads, 'q') + "s")
+ params.q_c_mvar = item.GetAttribute(map_power_var(pf_variable_p_loads, 'q') + "t")
if 'sn_mva' in variables:
- params.sn_a_mva = ga(item, map_power_var(pf_variable_p_loads, 's') + "r")
- params.sn_b_mva = ga(item, map_power_var(pf_variable_p_loads, 's') + "s")
- params.sn_c_mva = ga(item, map_power_var(pf_variable_p_loads, 's') + "t")
+ params.sn_a_mva = item.GetAttribute(map_power_var(pf_variable_p_loads, 's') + "r")
+ params.sn_b_mva = item.GetAttribute(map_power_var(pf_variable_p_loads, 's') + "s")
+ params.sn_c_mva = item.GetAttribute(map_power_var(pf_variable_p_loads, 's') + "t")
kap = -1 if item.pf_recap == 1 else 1
try:
@@ -1589,9 +1607,9 @@ def split_line_add_bus_old(net, item, parent):
raise RuntimeError('incorrect length for section %s: %.3f' % (sec, sec_len_b))
# get coords
- if sid in net.line_geodata.index.values:
+ if net.line.at[sid, 'geo'].notna():
logger.debug('line has coords')
- coords = net.line_geodata.at[sid, 'coords']
+ coords = geojson.utils.coords(geojson.loads(net.line.at[sid, 'geo']))
logger.debug('old geodata of line %d: %s' % (sid, coords))
# get coords for 2 split lines
@@ -1619,12 +1637,17 @@ def split_line_add_bus_old(net, item, parent):
logger.debug('created new bus in net: %s' % net.bus.loc[bus])
# create new line
- lid = pp.create_line(net, from_bus=bus, to_bus=net.line.at[sid, 'to_bus'],
- length_km=sec_len_b,
- std_type=net.line.at[sid, 'std_type'],
- name=net.line.at[sid, 'name'], df=net.line.at[sid, 'df'])
+ lid = pp.create_line(
+ net,
+ from_bus=bus,
+ to_bus=net.line.at[sid, 'to_bus'],
+ length_km=sec_len_b,
+ std_type=net.line.at[sid, 'std_type'],
+ name=net.line.at[sid, 'name'],
+ df=net.line.at[sid, 'df'],
+ geodata=coords_b
+ )
net.line.at[lid, 'section'] = net.line.at[sid, 'section']
- net.line_geodata.loc[lid, 'coords'] = coords_b
if not net.line.loc[sid, 'section_idx']:
net.line.loc[sid, 'section_idx'] = 0
@@ -1635,7 +1658,7 @@ def split_line_add_bus_old(net, item, parent):
net.line.at[sid, 'to_bus'] = bus
net.line.at[sid, 'length_km'] = sec_len_a
- net.line_geodata.loc[sid, 'coords'] = coords_a
+ net.line.at[sid, 'geo'] = geojson.dumps(geojson.LineString(coords_a))
logger.debug('changed: %s' % net.line.loc[sid])
else:
# no new bus/line are created: take the to_bus
@@ -1656,7 +1679,7 @@ def create_load(net, item, pf_variable_p_loads, dict_net, is_unbalanced):
ask = ask_unbalanced_load_params if is_unbalanced else ask_load_params
if load_class == 'ElmLodlv':
- # if bool(ga(item, 'e:cHasPartLod')):
+ # if bool(item.GetAttribute('e:cHasPartLod')):
# logger.info('ElmLodlv %s has partial loads - skip' % item.loc_name)
# part_lods = item.GetContents('*.ElmLodlvp')
# logger.debug('%s' % part_lods)
@@ -1691,8 +1714,8 @@ def create_load(net, item, pf_variable_p_loads, dict_net, is_unbalanced):
i = 0
z = 0
for cc, ee in zip(("aP", "bP", "cP"), ("kpu0", "kpu1", "kpu")):
- c = ga(load_type, cc)
- e = ga(load_type, ee)
+ c = load_type.GetAttribute(cc)
+ e = load_type.GetAttribute(ee)
if e == 1:
i += 100 * c
elif e == 2:
@@ -1778,8 +1801,8 @@ def create_load(net, item, pf_variable_p_loads, dict_net, is_unbalanced):
# if not is_unbalanced:
# if item.HasResults(0): # 'm' results...
# logger.debug('<%s> has results' % params.name)
- # net["res_load"].at[ld, "pf_p"] = ga(item, 'm:P:bus1')
- # net["res_load"].at[ld, "pf_q"] = ga(item, 'm:Q:bus1')
+ # net["res_load"].at[ld, "pf_p"] = item.GetAttribute('m:P:bus1')
+ # net["res_load"].at[ld, "pf_q"] = item.GetAttribute('m:Q:bus1')
# else:
# net["res_load"].at[ld, "pf_p"] = np.nan
# net["res_load"].at[ld, "pf_q"] = np.nan
@@ -1810,7 +1833,7 @@ def get_pf_load_results(net, item, ld, is_unbalanced):
for res_var_pp, res_var_pf in result_variables.items():
res = np.nan
if item.HasResults(0):
- res = ga(item, res_var_pf) * get_power_multiplier(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf) * get_power_multiplier(item, res_var_pf)
net[load_type].at[ld, res_var_pp] = res
@@ -1820,11 +1843,11 @@ def ask_gen_params(item, pf_variable_p_gen, *vars):
if pf_variable_p_gen == 'm:P:bus1' and not item.HasResults(0):
raise RuntimeError('generator %s does not have results and is ignored' % item.loc_name)
if 'p_mw' in vars:
- params.p_mw = ga(item, pf_variable_p_gen) * multiplier
+ params.p_mw = item.GetAttribute(pf_variable_p_gen) * multiplier
if 'q_mvar' in vars:
- params.q_mvar = ga(item, map_power_var(pf_variable_p_gen, 'q')) * multiplier
+ params.q_mvar = item.GetAttribute(map_power_var(pf_variable_p_gen, 'q')) * multiplier
if 'sn_mva' in vars:
- params.sn_mva = ga(item, map_power_var(pf_variable_p_gen, 'sn')) * multiplier
+ params.sn_mva = item.GetAttribute(map_power_var(pf_variable_p_gen, 'sn')) * multiplier
params.scaling = item.scale0 if pf_variable_p_gen == 'pgini' else 1
# p_mw = p_mw, q_mvar = q_mvar, scaling = scaling
@@ -1840,25 +1863,25 @@ def ask_unbalanced_sgen_params(item, pf_variable_p_sgen, *vars):
technology = item.phtech
if technology in [0, 1]: # (0-1: 3PH)
if 'p_mw' in vars:
- params.p_a_mw = ga(item, pf_variable_p_sgen) / 3
- params.p_b_mw = ga(item, pf_variable_p_sgen) / 3
- params.p_c_mw = ga(item, pf_variable_p_sgen) / 3
+ params.p_a_mw = item.GetAttribute(pf_variable_p_sgen) / 3
+ params.p_b_mw = item.GetAttribute(pf_variable_p_sgen) / 3
+ params.p_c_mw = item.GetAttribute(pf_variable_p_sgen) / 3
if 'q_mvar' in vars:
- params.q_a_mvar = ga(item, map_power_var(pf_variable_p_sgen, 'q')) / 3
- params.q_b_mvar = ga(item, map_power_var(pf_variable_p_sgen, 'q')) / 3
- params.q_c_mvar = ga(item, map_power_var(pf_variable_p_sgen, 'q')) / 3
+ params.q_a_mvar = item.GetAttribute(map_power_var(pf_variable_p_sgen, 'q')) / 3
+ params.q_b_mvar = item.GetAttribute(map_power_var(pf_variable_p_sgen, 'q')) / 3
+ params.q_c_mvar = item.GetAttribute(map_power_var(pf_variable_p_sgen, 'q')) / 3
elif technology in [2, 3, 4]: # (2-4: 1PH)
if 'p_mw' in vars:
- params.p_a_mw = ga(item, pf_variable_p_sgen)
+ params.p_a_mw = item.GetAttribute(pf_variable_p_sgen)
params.p_b_mw = 0
params.p_c_mw = 0
if 'q_mvar' in vars:
- params.q_a_mvar = ga(item, map_power_var(pf_variable_p_sgen, 'q'))
+ params.q_a_mvar = item.GetAttribute(map_power_var(pf_variable_p_sgen, 'q'))
params.q_b_mvar = 0
params.q_c_mvar = 0
if 'sn_mva' in vars:
- params.sn_mva = ga(item, map_power_var(pf_variable_p_sgen, 's'))
+ params.sn_mva = item.GetAttribute(map_power_var(pf_variable_p_sgen, 's'))
params.scaling = item.scale0 if pf_variable_p_sgen == 'pgini' else 1
return params
@@ -1888,7 +1911,7 @@ def create_sgen_genstat(net, item, pv_as_slack, pf_variable_p_gen, dict_net, is_
return
params.update(ask(item, pf_variable_p_gen, 'p_mw', 'q_mvar', 'sn_mva'))
- logger.debug('genstat parameters: ' % params)
+ logger.debug(f'genstat parameters: {params}')
params.in_service = monopolar_in_service(item)
@@ -2036,7 +2059,7 @@ def get_pf_sgen_results(net, item, sg, is_unbalanced, element='sgen'):
res = np.nan
if item.HasResults(0):
if res_var_pf is not None:
- res = ga(item, res_var_pf) * get_power_multiplier(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf) * get_power_multiplier(item, res_var_pf)
else:
res = np.nan
net[sgen_type].at[sg, res_var_pp] = res
@@ -2074,8 +2097,8 @@ def create_sgen_neg_load(net, item, pf_variable_p_loads, dict_net):
if item.HasResults(0): # 'm' results...
logger.debug('<%s> has results' % params.name)
- net.res_sgen.at[sg, "pf_p"] = -ga(item, 'm:P:bus1')
- net.res_sgen.at[sg, "pf_q"] = -ga(item, 'm:Q:bus1')
+ net.res_sgen.at[sg, "pf_p"] = -item.GetAttribute('m:P:bus1')
+ net.res_sgen.at[sg, "pf_q"] = -item.GetAttribute('m:Q:bus1')
else:
net.res_sgen.at[sg, "pf_p"] = np.nan
net.res_sgen.at[sg, "pf_q"] = np.nan
@@ -2174,8 +2197,8 @@ def create_sgen_sym(net, item, pv_as_slack, pf_variable_p_gen, dict_net, export_
if item.HasResults(0): # 'm' results...
logger.debug('<%s> has results' % name)
- net['res_' + element].at[sid, "pf_p"] = ga(item, 'm:P:bus1') * multiplier
- net['res_' + element].at[sid, "pf_q"] = ga(item, 'm:Q:bus1') * multiplier
+ net['res_' + element].at[sid, "pf_p"] = item.GetAttribute('m:P:bus1') * multiplier
+ net['res_' + element].at[sid, "pf_q"] = item.GetAttribute('m:Q:bus1') * multiplier
else:
net['res_' + element].at[sid, "pf_p"] = np.nan
net['res_' + element].at[sid, "pf_q"] = np.nan
@@ -2189,10 +2212,10 @@ def create_sgen_asm(net, item, pf_variable_p_gen, dict_net):
dict_net['global_parameters']['global_generation_scaling']
multiplier = get_power_multiplier(item, pf_variable_p_gen)
- p_res = ga(item, 'pgini') * multiplier
- q_res = ga(item, 'qgini') * multiplier
+ p_res = item.GetAttribute('pgini') * multiplier
+ q_res = item.GetAttribute('qgini') * multiplier
if item.HasResults(0):
- q_res = ga(item, 'm:Q:bus1') / global_scaling * multiplier
+ q_res = item.GetAttribute('m:Q:bus1') / global_scaling * multiplier
else:
logger.warning('reactive power for asynchronous generator is not exported properly '
'(advanced modelling of asynchronous generators not implemented)')
@@ -2227,8 +2250,8 @@ def create_sgen_asm(net, item, pf_variable_p_gen, dict_net):
attr_list=["sernum", "chr_name", "cpSite.loc_name"])
if item.HasResults(0):
- net.res_sgen.at[sid, 'pf_p'] = ga(item, 'm:P:bus1') * multiplier
- net.res_sgen.at[sid, 'pf_q'] = ga(item, 'm:Q:bus1') * multiplier
+ net.res_sgen.at[sid, 'pf_p'] = item.GetAttribute('m:P:bus1') * multiplier
+ net.res_sgen.at[sid, 'pf_q'] = item.GetAttribute('m:Q:bus1') * multiplier
else:
net.res_sgen.at[sid, 'pf_p'] = np.nan
net.res_sgen.at[sid, 'pf_q'] = np.nan
@@ -2339,11 +2362,11 @@ def create_trafo(net, item, export_controller=True, tap_opt="nntap", is_unbalanc
tap_pos = np.nan
if pf_type.itapch:
if tap_opt == "nntap":
- tap_pos = ga(item, "nntap")
+ tap_pos = item.GetAttribute("nntap")
logger.debug("got tap %f from nntap" % tap_pos)
elif tap_opt == "c:nntap":
- tap_pos = ga(item, "c:nntap")
+ tap_pos = item.GetAttribute("c:nntap")
logger.debug("got tap %f from c:nntap" % tap_pos)
else:
raise ValueError('could not read current tap position: tap_opt = %s' % tap_opt)
@@ -2352,9 +2375,9 @@ def create_trafo(net, item, export_controller=True, tap_opt="nntap", is_unbalanc
# In PowerFactory, if the first tap changer is absent, the second is also, even if the check was there
if pf_type.itapch and pf_type.itapch2:
if tap_opt == "nntap":
- tap_pos2 = ga(item, "nntap2")
+ tap_pos2 = item.GetAttribute("nntap2")
elif tap_opt == "c:nntap":
- tap_pos2 = ga(item, "c:nntap2")
+ tap_pos2 = item.GetAttribute("c:nntap2")
if std_type is not None:
tid = pp.create_transformer(net, hv_bus=bus1, lv_bus=bus2, name=name,
@@ -2365,16 +2388,31 @@ def create_trafo(net, item, export_controller=True, tap_opt="nntap", is_unbalanc
logger.debug('created trafo at index <%d>' % tid)
else:
logger.info("Create Trafo 3ph")
- tid = pp.create_transformer_from_parameters(net, hv_bus=bus1, lv_bus=bus2, name=name,
- tap_pos=tap_pos,
- in_service=in_service, parallel=item.ntnum, df=item.ratfac,
- sn_mva=pf_type.strn, vn_hv_kv=pf_type.utrn_h, vn_lv_kv=pf_type.utrn_l,
- vk_percent=pf_type.uktr, vkr_percent=pf_type.uktrr,
- pfe_kw=pf_type.pfe, i0_percent=pf_type.curmg,
- vector_group=pf_type.vecgrp[:-1], vk0_percent=pf_type.uk0tr,
- vkr0_percent=pf_type.ur0tr, mag0_percent=pf_type.zx0hl_n,
- mag0_rx=pf_type.rtox0_n, si0_hv_partial=pf_type.zx0hl_h,
- shift_degree=pf_type.nt2ag * 30, tap2_pos=tap_pos2)
+ tid = pp.create_transformer_from_parameters(
+ net,
+ hv_bus=bus1,
+ lv_bus=bus2,
+ name=name,
+ tap_pos=tap_pos,
+ in_service=in_service,
+ parallel=item.ntnum,
+ df=item.ratfac,
+ sn_mva=pf_type.strn,
+ vn_hv_kv=pf_type.utrn_h,
+ vn_lv_kv=pf_type.utrn_l,
+ vk_percent=pf_type.uktr,
+ vkr_percent=pf_type.uktrr,
+ pfe_kw=pf_type.pfe,
+ i0_percent=pf_type.curmg,
+ vector_group=pf_type.vecgrp[:-1],
+ vk0_percent=pf_type.uk0tr,
+ vkr0_percent=pf_type.ur0tr,
+ mag0_percent=pf_type.zx0hl_n,
+ mag0_rx=pf_type.rtox0_n,
+ si0_hv_partial=pf_type.zx0hl_h,
+ shift_degree=pf_type.nt2ag * 30,
+ tap2_pos=tap_pos2
+ )
trafo_dict[item] = tid
# add value for voltage setpoint
@@ -2446,15 +2484,15 @@ def get_pf_trafo_results(net, item, tid, is_unbalanced):
if is_unbalanced:
trafo_type = "res_trafo_3ph"
result_variables = {
- "pf_i_a_hv_ka": "m:I:bushv:A",
- "pf_i_a_lv_ka": "m:I:buslv:A",
- "pf_i_b_hv_ka": "m:I:bushv:B",
- "pf_i_b_lv_ka": "m:I:buslv:B",
- "pf_i_c_hv_ka": "m:I:bushv:C",
- "pf_i_c_lv_ka": "m:I:buslv:C",
- "pf_i_n_hv_ka": "m:I0x3:bushv",
- "pf_i_n_lv_ka": "m:I0x3:buslv",
- "pf_loading_percent": "c:loading",
+ "pf_i_a_hv_ka": "m:I:bushv:A",
+ "pf_i_a_lv_ka": "m:I:buslv:A",
+ "pf_i_b_hv_ka": "m:I:bushv:B",
+ "pf_i_b_lv_ka": "m:I:buslv:B",
+ "pf_i_c_hv_ka": "m:I:bushv:C",
+ "pf_i_c_lv_ka": "m:I:buslv:C",
+ "pf_i_n_hv_ka": "m:I0x3:bushv",
+ "pf_i_n_lv_ka": "m:I0x3:buslv",
+ "pf_loading_percent": "c:loading",
}
else:
trafo_type = "res_trafo"
@@ -2465,7 +2503,7 @@ def get_pf_trafo_results(net, item, tid, is_unbalanced):
for res_var_pp, res_var_pf in result_variables.items():
res = np.nan
if item.HasResults(-1): # -1 for 'c' results (whatever that is...)
- res = ga(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf)
net[trafo_type].at[tid, res_var_pp] = res
@@ -2534,21 +2572,21 @@ def create_trafo3w(net, item, tap_opt='nntap'):
ts = ["h", "m", "l"][side[0]]
# figure out current tap position
if tap_opt == "nntap":
- tap_pos = ga(item, 'n3tap_' + ts)
+ tap_pos = item.GetAttribute('n3tap_' + ts)
logger.debug("got tap %f from n3tap" % tap_pos)
elif tap_opt == "c:nntap":
- tap_pos = ga(item, "c:n3tap_" + ts)
+ tap_pos = item.GetAttribute("c:n3tap_" + ts)
logger.debug("got tap %f from c:n3tap" % tap_pos)
else:
raise ValueError('could not read current tap position: tap_opt = %s' % tap_opt)
params.update({
'tap_side': ts + 'v', # hv, mv, lv
- 'tap_step_percent': ga(item, 't:du3tp_' + ts),
- 'tap_step_degree': ga(item, 't:ph3tr_' + ts),
- 'tap_min': ga(item, 't:n3tmn_' + ts),
- 'tap_max': ga(item, 't:n3tmx_' + ts),
- 'tap_neutral': ga(item, 't:n3tp0_' + ts),
+ 'tap_step_percent': item.GetAttribute('t:du3tp_' + ts),
+ 'tap_step_degree': item.GetAttribute('t:ph3tr_' + ts),
+ 'tap_min': item.GetAttribute('t:n3tmn_' + ts),
+ 'tap_max': item.GetAttribute('t:n3tmx_' + ts),
+ 'tap_neutral': item.GetAttribute('t:n3tp0_' + ts),
'tap_pos': tap_pos
})
@@ -2562,7 +2600,7 @@ def create_trafo3w(net, item, tap_opt='nntap'):
logger.debug('successfully created trafo3w from parameters: %d' % tid)
# testen
- # net.trafo3w.loc[tid, 'tap_step_degree'] = ga(item, 't:ph3tr_h')
+ # net.trafo3w.loc[tid, 'tap_step_degree'] = item.GetAttribute('t:ph3tr_h')
# adding switches
# False if open, True if closed, None if no switch
@@ -2577,7 +2615,7 @@ def create_trafo3w(net, item, tap_opt='nntap'):
# assign loading from power factory results
if item.HasResults(-1): # -1 for 'c' results (whatever that is...)
logger.debug('trafo3w <%s> has results' % item.loc_name)
- loading = ga(item, 'c:loading')
+ loading = item.GetAttribute('c:loading')
net.res_trafo3w.at[tid, "pf_loading"] = loading
else:
net.res_trafo3w.at[tid, "pf_loading"] = np.nan
@@ -2587,12 +2625,12 @@ def create_trafo3w(net, item, tap_opt='nntap'):
if pf_type.itapzdep:
x_points = (net.trafo3w.at[tid, "tap_min"], net.trafo3w.at[tid, "tap_neutral"], net.trafo3w.at[tid, "tap_max"])
for side in ("hv", "mv", "lv"):
- vk_min = ga(pf_type, f"uktr3mn_{side[0]}")
+ vk_min = pf_type.GetAttribute(f"uktr3mn_{side[0]}")
vk_neutral = net.trafo3w.at[tid, f"vk_{side}_percent"]
- vk_max = ga(pf_type, f"uktr3mx_{side[0]}")
- vkr_min = ga(pf_type, f"uktrr3mn_{side[0]}")
+ vk_max = pf_type.GetAttribute(f"uktr3mx_{side[0]}")
+ vkr_min = pf_type.GetAttribute(f"uktrr3mn_{side[0]}")
vkr_neutral = net.trafo3w.at[tid, f"vkr_{side}_percent"]
- vkr_max = ga(pf_type, f"uktrr3mx_{side[0]}")
+ vkr_max = pf_type.GetAttribute(f"uktrr3mx_{side[0]}")
# todo zero-sequence parameters (must be implemented in build_branch first)
pp.control.create_trafo_characteristics(net, trafotable="trafo3w", trafo_index=tid,
variable=f"vk_{side}_percent", x_points=x_points,
@@ -2648,7 +2686,7 @@ def create_coup(net, item, is_fuse=False):
# # false approach, completely irrelevant
# def create_switch(net, item):
# switch_types = {"cbk": "CB", "sdc": "LBS", "swt": "LS", "dct": "DS"}
-# name = ga(item, 'loc_name')
+# name = item.GetAttribute('loc_name')
# logger.debug('>> creating switch <%s>' % name)
#
# pf_bus1 = item.GetNode(0)
@@ -2663,8 +2701,8 @@ def create_coup(net, item, is_fuse=False):
# bus2 = find_bus_index_in_net(pf_bus2, net)
# logger.debug('switch %s connects buses <%d> and <%d>' % (name, bus1, bus2))
#
-# switch_is_closed = bool(ga(item, 'on_off'))
-# switch_usage = switch_types[ga(item, 'aUsage')]
+# switch_is_closed = bool(item.GetAttribute('on_off'))
+# switch_usage = switch_types[item.GetAttribute('aUsage')]
#
# cd = pp.create_switch(net, name=name, bus=bus1, element=bus2, et='b',
# closed=switch_is_closed, type=switch_usage)
@@ -2679,6 +2717,12 @@ def create_shunt(net, item):
logger.error("Cannot add Shunt '%s': not connected" % item.loc_name)
return
+ def calc_p_mw_and_q_mvar(r: float, x: float) -> tuple[float, float]:
+ if r == 0 and x == 0:
+ return 0, 0
+ divisor: float = (r ** 2 + x ** 2)
+ return (item.ushnm ** 2 * r) / divisor * multiplier, (item.ushnm ** 2 * x) / divisor * multiplier
+
multiplier = get_power_multiplier(item, 'Qact')
bus, _ = get_connection_nodes(net, item, 1)
params = {
@@ -2691,87 +2735,62 @@ def create_shunt(net, item):
'max_step': item.ncapx
}
print(item.loc_name)
+ r_val: float = .0
+ x_val: float = .0
if item.shtype == 0:
# Shunt is a R-L-C element
-
- R = item.rrea
- X = -1e6 / item.bcap + item.xrea
- if R == 0 and X == 0: #TODO put this into one function
- p_mw = 0
- params['q_mvar'] = 0
- else:
- p_mw = (item.ushnm ** 2 * R) / (R ** 2 + X ** 2) * multiplier
- params['q_mvar'] = (item.ushnm ** 2 * X) / (R ** 2 + X ** 2) * multiplier
- sid = pp.create_shunt(net, p_mw=p_mw, **params)
+ r_val = item.rrea
+ x_val = -1e6 / item.bcap + item.xrea
elif item.shtype == 1:
# Shunt is an R-L element
-
- R = item.rrea
- X = item.xrea
- if R == 0 and X == 0: #TODO put this into one function
- p_mw = 0
- params['q_mvar'] = 0
- else:
- p_mw = (item.ushnm ** 2 * R) / (R ** 2 + X ** 2) * multiplier
- params['q_mvar'] = (item.ushnm ** 2 * X) / (R ** 2 + X ** 2) * multiplier
- sid = pp.create_shunt(net, p_mw=p_mw, **params)
+ r_val = item.rrea
+ x_val = item.xrea
elif item.shtype == 2:
# Shunt is a capacitor bank
- B = item.bcap*1e-6
- G = item.gparac*1e-6
-
- R = G/(G**2 + B**2)
- X = -B/(G**2 + B**2)
- if R == 0 and X == 0: #TODO put this into one function
- p_mw = 0
- params['q_mvar'] = 0
- else:
- p_mw = (item.ushnm ** 2 * R) / (R ** 2 + X ** 2) * multiplier
- params['q_mvar'] = (item.ushnm ** 2 * X) / (R ** 2 + X ** 2) * multiplier
- sid = pp.create_shunt(net, p_mw=p_mw, **params)
+ b = item.bcap*1e-6
+ g = item.gparac*1e-6
+
+ r_val = g / (g ** 2 + b ** 2)
+ x_val = -b / (g ** 2 + b ** 2)
elif item.shtype == 3:
# Shunt is a R-L-C, Rp element
+ rp = item.rpara
+ rs = item.rrea
+ xl = item.xrea
+ bc = -item.bcap * 1e-6
- Rp = item.rpara
- Rs = item.rrea
- Xl = item.xrea
- Bc = -item.bcap * 1e-6
-
- R = Rp * (Rp * Rs + Rs ** 2 + Xl ** 2) / ((Rp + Rs) ** 2 + Xl ** 2)
- X = 1 / Bc + (Xl * Rp ** 2) / ((Rp + Rs) ** 2 + Xl ** 2)
- if R == 0 and X == 0: #TODO put this into one function
- p_mw = 0
- params['q_mvar'] = 0
- else:
- p_mw = (item.ushnm ** 2 * R) / (R ** 2 + X ** 2) * multiplier
- params['q_mvar'] = (item.ushnm ** 2 * X) / (R ** 2 + X ** 2) * multiplier
- sid = pp.create_shunt(net, p_mw=p_mw, **params)
+ r_val = rp * (rp * rs + rs ** 2 + xl ** 2) / ((rp + rs) ** 2 + xl ** 2)
+ x_val = 1 / bc + (xl * rp ** 2) / ((rp + rs) ** 2 + xl ** 2)
elif item.shtype == 4:
# Shunt is a R-L-C1-C2, Rp element
-
- Rp = item.rpara
- Rs = item.rrea
- Xl = item.xrea
- B1 = 2 * np.pi * 50 * item.c1 * 1e-6
- B2 = 2 * np.pi * 50 * item.c2 * 1e-6
-
- Z = Rp * (Rs + 1j * (Xl - 1 / B1)) / (Rp + Rs + 1j * (Xl - 1 / B1)) - 1j / B2
- R = np.real(Z)
- X = np.imag(Z)
- if R == 0 and X == 0: #TODO put this into one function
- p_mw = 0
- params['q_mvar'] = 0
- else:
- p_mw = (item.ushnm ** 2 * R) / (R ** 2 + X ** 2) * multiplier
- params['q_mvar'] = (item.ushnm ** 2 * X) / (R ** 2 + X ** 2) * multiplier
+ rp = item.rpara
+ rs = item.rrea
+ xl = item.xrea
+ b1 = 2 * np.pi * 50 * item.c1 * 1e-6
+ b2 = 2 * np.pi * 50 * item.c2 * 1e-6
+
+ z = rp * (rs + 1j * (xl - 1 / b1)) / (rp + rs + 1j * (xl - 1 / b1)) - 1j / b2
+ r_val = np.real(z)
+ x_val = np.imag(z)
+
+ if 0 <= item.shtype <= 4:
+ p_mw, params['q_mvar'] = calc_p_mw_and_q_mvar(r_val, x_val)
sid = pp.create_shunt(net, p_mw=p_mw, **params)
- add_additional_attributes(item, net, element='shunt', element_id=sid,
- attr_list=['cpSite.loc_name'], attr_dict={"cimRdfId": "origin_id"})
+ add_additional_attributes(
+ item,
+ net,
+ element='shunt',
+ element_id=sid,
+ attr_list=['cpSite.loc_name'],
+ attr_dict={"cimRdfId": "origin_id"}
+ )
+ else:
+ raise AttributeError(f"Shunt type {item.shtype} not valid: {item}")
if item.HasResults(0):
- net.res_shunt.loc[sid, 'pf_p'] = ga(item, 'm:P:bus1') * multiplier
- net.res_shunt.loc[sid, 'pf_q'] = ga(item, 'm:Q:bus1') * multiplier
+ net.res_shunt.loc[sid, 'pf_p'] = item.GetAttribute('m:P:bus1') * multiplier
+ net.res_shunt.loc[sid, 'pf_q'] = item.GetAttribute('m:Q:bus1') * multiplier
else:
net.res_shunt.loc[sid, 'pf_p'] = np.nan
net.res_shunt.loc[sid, 'pf_q'] = np.nan
@@ -2876,8 +2895,8 @@ def create_vac(net, item):
params['name'], item.itype))
if item.HasResults(0): # -1 for 'c' results (whatever that is...)
- net['res_%s' % elm].at[xid, "pf_p"] = -ga(item, 'm:P:bus1')
- net['res_%s' % elm].at[xid, "pf_q"] = -ga(item, 'm:Q:bus1')
+ net['res_%s' % elm].at[xid, "pf_p"] = -item.GetAttribute('m:P:bus1')
+ net['res_%s' % elm].at[xid, "pf_q"] = -item.GetAttribute('m:Q:bus1')
else:
net['res_%s' % elm].at[xid, "pf_p"] = np.nan
net['res_%s' % elm].at[xid, "pf_q"] = np.nan
@@ -2906,6 +2925,7 @@ def create_sind(net, item):
logger.debug('created series reactor %s as per unit impedance at index %d' %
(net.impedance.at[sind, 'name'], sind))
+
def create_scap(net, item):
# series capacitor is modelled as per-unit impedance, values in Ohm are calculated into values in
# per unit at creation
@@ -2915,11 +2935,11 @@ def create_scap(net, item):
logger.error("Cannot add Scap '%s': not connected" % item.loc_name)
return
- if (item.gcap==0) or (item.bcap==0):
+ if (item.gcap == 0) or (item.bcap == 0):
logger.info('not creating series capacitor for %s' % item.loc_name)
else:
- r_ohm = item.gcap/(item.gcap**2 + item.bcap**2)
- x_ohm = -item.bcap/(item.gcap**2 + item.bcap**2)
+ r_ohm = item.gcap / (item.gcap ** 2 + item.bcap ** 2)
+ x_ohm = -item.bcap / (item.gcap ** 2 + item.bcap ** 2)
scap = pp.create_series_reactor_as_impedance(net, from_bus=bus1, to_bus=bus2, r_ohm=r_ohm,
x_ohm=x_ohm, sn_mva=item.Sn,
name=item.loc_name,
@@ -2944,8 +2964,8 @@ def _get_vsc_control_modes(item, mono=True):
f" {item.loc_name} not implemented: {c_m}")
if item.HasResults(0):
- p_set_dc = -ga(item, f"m:P:{dc_bus_str}")
- q_set_ac = -ga(item, "m:Q:busac") * scaling
+ p_set_dc = -item.GetAttribute(f"m:P:{dc_bus_str}")
+ q_set_ac = -item.GetAttribute("m:Q:busac") * scaling
else:
p_set_dc = -item.psetp * scaling # does not work - in PowerFactory, the P set-point relates to AC side
q_set_ac = -item.qsetp * scaling
@@ -2965,7 +2985,6 @@ def _get_vsc_control_modes(item, mono=True):
def create_vscmono(net, item):
-
(bus, bus_dc), _ = get_connection_nodes(net, item, 2)
sn_mva = item.Snom
@@ -3000,7 +3019,9 @@ def create_vscmono(net, item):
}
if params["r_dc_ohm"] == 0:
- logger.warning(f"VSCmono element {params['name']} has no DC resistive loss factor - power flow will not converge!")
+ logger.warning(
+ f"VSCmono element {params['name']} has no DC resistive loss factor - power flow will not converge!"
+ )
vid = pp.create_vsc(net, **params)
logger.debug(f'created VSC {vid} for vscmono {item.loc_name}')
@@ -3012,7 +3033,7 @@ def create_vscmono(net, item):
for res_var_pp, res_var_pf in result_variables.items():
res = np.nan
if item.HasResults(0):
- res = ga(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf)
net.res_vsc.at[vid, res_var_pp] = -res
@@ -3060,22 +3081,20 @@ def create_vsc(net, item):
if item.HasResults(0):
for res_var_pp, res_var_pf in result_variables.items():
- res = ga(item, res_var_pf)
+ res = item.GetAttribute(res_var_pf)
net.res_vsc.at[vid_1, res_var_pp] = -res / 2
net.res_vsc.at[vid_2, res_var_pp] = -res / 2
- net.res_vsc.at[vid_1, "pf_p_dc_mw"] = -ga(item, "m:P:busdm")
- net.res_vsc.at[vid_2, "pf_p_dc_mw"] = -ga(item, "m:P:busdp")
+ net.res_vsc.at[vid_1, "pf_p_dc_mw"] = -item.GetAttribute("m:P:busdm")
+ net.res_vsc.at[vid_2, "pf_p_dc_mw"] = -item.GetAttribute("m:P:busdp")
else:
net.res_vsc.loc[vid_1, ["pf_p_mw", "pf_q_mvar", "pf_p_dc_mw"]] = np.nan
net.res_vsc.loc[vid_2, ["pf_p_mw", "pf_q_mvar", "pf_p_dc_mw"]] = np.nan
-
def create_stactrl(net, item):
stactrl_in_service = True
if item.outserv:
logger.info(f"Station controller {item.loc_name} is out of service")
- stactrl_in_service = False
return
machines = [m for m in item.psym if m is not None]
@@ -3231,15 +3250,15 @@ def create_stactrl(net, item):
if not has_path and not control_mode == 0 and not item.i_droop:
return
- if control_mode == 0: #### VOLTAGE CONTROL
+ if control_mode == 0: # VOLTAGE CONTROL
# controlled_node = item.rembar
controlled_node = item.cpCtrlNode
bus = bus_dict[controlled_node] # controlled node
- if item.uset_mode == 0: #### Station controller
+ if item.uset_mode == 0: # Station controller
v_setpoint_pu = item.usetp
else:
- v_setpoint_pu = controlled_node.vtarget #### Bus target voltage
+ v_setpoint_pu = controlled_node.vtarget # Bus target voltage
if item.i_droop: # Enable Droop
bsc = pp.control.BinarySearchControl(net, ctrl_in_service=stactrl_in_service,
@@ -3268,28 +3287,49 @@ def create_stactrl(net, item):
# q_control_mode = item.qu_char # 0: "Const Q", 1: "Q(V) Characteristic", 2: "Q(P) Characteristic"
# q_control_terminal = q_control_cubicle.cterm # terminal of the cubicle
if item.qu_char == 0:
- pp.control.BinarySearchControl(net, ctrl_in_service=stactrl_in_service,
- output_element=gen_element, output_variable="q_mvar",
- output_element_index=gen_element_index,
- output_element_in_service=gen_element_in_service,
- input_element=res_element_table,
- output_values_distribution=distribution, damping_factor=0.9,
- input_variable=variable, input_element_index=res_element_index,
- set_point=item.qsetp, voltage_ctrl=False, tol=1e-6)
+ pp.control.BinarySearchControl(
+ net, ctrl_in_service=stactrl_in_service,
+ output_element=gen_element,
+ output_variable="q_mvar",
+ output_element_index=gen_element_index,
+ output_element_in_service=gen_element_in_service,
+ input_element=res_element_table,
+ output_values_distribution=distribution,
+ damping_factor=0.9,
+ input_variable=variable,
+ input_element_index=res_element_index,
+ set_point=item.qsetp,
+ voltage_ctrl=False, tol=1e-6
+ )
elif item.qu_char == 1:
controlled_node = item.refbar
bus = bus_dict[controlled_node] # controlled node
- bsc = pp.control.BinarySearchControl(net, ctrl_in_service=stactrl_in_service,
- output_element=gen_element, output_variable="q_mvar",
- output_element_index=gen_element_index,
- output_element_in_service=gen_element_in_service,
- input_element=res_element_table,
- output_values_distribution=distribution, damping_factor=0.9,
- input_variable=variable, input_element_index=res_element_index,
- set_point=item.qsetp, voltage_ctrl=False, bus_idx=bus, tol=1e-6)
- pp.control.DroopControl(net, q_droop_mvar=item.Srated * 100 / item.ddroop, bus_idx=bus,
- vm_set_pu=item.udeadbup, vm_set_ub=item.udeadbup, vm_set_lb=item.udeadblow,
- controller_idx=bsc.index, voltage_ctrl=False)
+ bsc = pp.control.BinarySearchControl(
+ net, ctrl_in_service=stactrl_in_service,
+ output_element=gen_element,
+ output_variable="q_mvar",
+ output_element_index=gen_element_index,
+ output_element_in_service=gen_element_in_service,
+ input_element=res_element_table,
+ output_values_distribution=distribution,
+ damping_factor=0.9,
+ input_variable=variable,
+ input_element_index=res_element_index,
+ set_point=item.qsetp,
+ voltage_ctrl=False,
+ bus_idx=bus,
+ tol=1e-6
+ )
+ pp.control.DroopControl(
+ net,
+ q_droop_mvar=item.Srated * 100 / item.ddroop,
+ bus_idx=bus,
+ vm_set_pu=item.udeadbup,
+ vm_set_ub=item.udeadbup,
+ vm_set_lb=item.udeadblow,
+ controller_idx=bsc.index,
+ voltage_ctrl=False
+ )
else:
raise NotImplementedError
else:
@@ -3316,28 +3356,34 @@ def split_line_at_length(net, line, length_pos):
std_type = net.line.at[line, 'std_type']
name = net.line.at[line, 'name']
- new_line = pp.create_line(net, from_bus=bus, to_bus=bus2, length_km=new_length,
- std_type=std_type, name=name, df=net.line.at[line, 'df'],
- parallel=net.line.at[line, 'parallel'],
- in_service=net.line.at[line, 'in_service'])
+ new_line = pp.create_line(
+ net,
+ from_bus=bus,
+ to_bus=bus2,
+ length_km=new_length,
+ std_type=std_type,
+ name=name,
+ df=net.line.at[line, 'df'],
+ parallel=net.line.at[line, 'parallel'],
+ in_service=net.line.at[line, 'in_service']
+ )
if 'max_loading_percent' in net.line.columns:
net.line.loc[new_line, 'max_loading_percent'] = net.line.at[line, 'max_loading_percent']
- if 'line_geodata' in net.keys() and line in net.line_geodata.index.values:
- coords = net.line_geodata.at[line, 'coords']
+ if net.line.loc[line, 'geo'].notna():
+ coords = geojson.utils.coords(geojson.loads(net.line.loc[line, 'geo']))
scaling_factor = old_length / calc_len_coords(coords)
- sec_coords_a = get_section_coords(coords, sec_len=length_pos, start_len=0.,
- scale_factor=scaling_factor)
-
- sec_coords_b = get_section_coords(coords, sec_len=new_length, start_len=length_pos,
- scale_factor=scaling_factor)
+ sec_coords_a = get_section_coords(coords, sec_len=length_pos, start_len=0., scale_factor=scaling_factor)
+ sec_coords_b = get_section_coords(
+ coords, sec_len=new_length, start_len=length_pos, scale_factor=scaling_factor
+ )
- net.line_geodata.loc[line, 'coords'] = sec_coords_a
- net.line_geodata.loc[new_line, 'coords'] = sec_coords_b
+ net.line.loc[line, 'geo'] = geojson.dumps(geojson.LineString(sec_coords_a))
+ net.line.loc[new_line, 'geo'] = geojson.dumps(geojson.LineString(sec_coords_b))
- net.bus_geodata.loc[bus, ['x', 'y']] = sec_coords_b[0]
+ net.bus.loc[bus, ['geo']] = geojson.Point(sec_coords_b[0])
return bus
@@ -3346,16 +3392,14 @@ def get_lodlvp_length_pos(line_item, lod_item):
sections = line_item.GetContents('*.ElmLnesec')
if len(sections) > 0:
sections.sort(lambda x: x.index)
- sections_start = [s.rellen for s in sections]
sections_end = [s.rellen + s.dline for s in sections]
else:
- sections_start = [0]
sections_end = [line_item.dline]
loads = line_item.GetContents('*.ElmLodlvp')
if len(loads) > 0:
loads.sort(lambda x: x.rellen)
- loads_start = [l.rellen for l in loads]
+ loads_start = [load.rellen for load in loads]
else:
loads_start = [0]
@@ -3400,7 +3444,8 @@ def split_line(net, line_idx, pos_at_line, line_item):
return bus_j
elif (pos_at_line - line_length) > tol:
raise ValueError(
- 'Position at line is higher than the line length itself! Line length: %.7f, position at line: %.7f (line: \n%s)' % (
+ 'Position at line is higher than the line length itself!\
+ Line length: %.7f, position at line: %.7f (line: \n%s)' % (
# line_length, pos_at_line, line_item.loc_name))
line_length, pos_at_line, net.line.loc[line_dict[line_item]]))
else:
@@ -3438,7 +3483,7 @@ def split_line(net, line_idx, pos_at_line, line_item):
net.line.at[new_line, 'order'] = net.line.at[line_idx, 'order'] + 1
net.res_line.at[new_line, 'pf_loading'] = net.res_line.at[line_idx, 'pf_loading']
- if line_idx in net.line_geodata.index.values:
+ if line_idx in net.line.index:
logger.debug('setting new coords')
set_new_coords(net, new_bus, line_idx, new_line, line_length, pos_at_line)
@@ -3503,7 +3548,7 @@ def break_coords_sections(coords, section_length, scale_factor_length):
# set up new coordinates for line sections that are split by the new bus of the ElmLodlvp
def set_new_coords(net, bus_id, line_idx, new_line_idx, line_length, pos_at_line):
- line_coords = net.line_geodata.at[line_idx, 'coords']
+ line_coords = net.line.at[line_idx, 'geo']
logger.debug('got coords for line %s' % line_idx)
scale_factor_length = get_scale_factor(line_length, line_coords)
@@ -3512,11 +3557,10 @@ def set_new_coords(net, bus_id, line_idx, new_line_idx, line_length, pos_at_line
logger.debug('calculated new coords: %s, %s ' % (section_coords, new_coords))
- net.line_geodata.at[line_idx, 'coords'] = section_coords
- net.line_geodata.at[new_line_idx, 'coords'] = new_coords
+ net.line.at[line_idx, 'geo'] = geojson.dumps(geojson.LineString(section_coords))
+ net.line.at[new_line_idx, 'geo'] = geojson.dumps(geojson.LineString(new_coords))
- net.bus_geodata.at[bus_id, 'x'] = new_coords[0][0]
- net.bus_geodata.at[bus_id, 'y'] = new_coords[0][1]
+ net.bus.at[bus_id, 'geo'] = geojson.dumps(geojson.Point(new_coords[0]))
# gather info about ElmLodlvp in a dict
@@ -3659,7 +3703,7 @@ def split_all_lines(net, lvp_dict):
# val = [(92, 1, 0.025, 0.1), (91, 2, 0.031, 0.2), (90, 2, 0.032, 0.3)]
for load_item, pos_at_line, (p, q) in val:
logger.debug(load_item)
- ## calculate at once and then read from dict - not good approach! don't do it
+ # calculate at once and then read from dict - not good approach! don't do it
# section, pos_at_sec = get_pos_at_sec(net, net_dgs, lvp_dict, line, load_idx)
# section = pas[load_idx]['section']
# pos_at_sec = pas[load_idx]['pos']
@@ -3681,7 +3725,7 @@ def split_all_lines(net, lvp_dict):
net.res_load.at[new_load, 'pf_p'] = p
net.res_load.at[new_load, 'pf_q'] = q
else:
- # const I not implemented for sgen...
+ # const I is not implemented for sgen
new_load = pp.create_sgen(net, new_bus, name=load_item.loc_name, p_mw=p, q_mvar=q)
logger.debug('created sgen %s' % new_load)
net.res_sgen.at[new_load, 'pf_p'] = p
diff --git a/pandapower/protection/utility_functions.py b/pandapower/protection/utility_functions.py
index 7ae26364e..b1f3ad034 100644
--- a/pandapower/protection/utility_functions.py
+++ b/pandapower/protection/utility_functions.py
@@ -1,8 +1,9 @@
# This function includes various function used for general functionalities such as plotting, grid search
import copy
-from typing import overload, List, Tuple
+from typing import List, Tuple
+from matplotlib.collections import PatchCollection
from typing_extensions import deprecated
import geojson
@@ -16,6 +17,7 @@
import pandapower as pp
import pandapower.plotting as plot
+from pandapower import pandapowerNet
from pandapower.topology.create_graph import create_nxgraph
import warnings
@@ -122,7 +124,8 @@ def create_sc_bus(net_copy, sc_line_id, sc_fraction):
x1, y1 = _get_coords_from_bus_idx(net, aux_line.from_bus)[0]
x2, y2 = _get_coords_from_bus_idx(net, aux_line.to_bus)[0]
- net.bus.geo.at[max_idx_bus + 1] = geojson.dumps(geojson.Point((sc_fraction * (x2 - x1) + x1, sc_fraction * (y2 - y1) + y1)), sort_keys=True)
+ net.bus.geo.at[max_idx_bus + 1] = geojson.dumps(
+ geojson.Point((sc_fraction * (x2 - x1) + x1, sc_fraction * (y2 - y1) + y1)), sort_keys=True)
return net
@@ -145,21 +148,9 @@ def calc_faults_at_full_line(net, line, location_step_size=0.01, start_location=
return fault_currents
-def get_line_idx(net, switch_id):
- # get the line id from swithc id
- line_idx = net.switch.element.at[switch_id]
- return line_idx
-
-
-def get_bus_idx(net, switch_id):
- # get the bus id using switch if
- bus_idx = net.switch.bus.at[switch_id]
- return bus_idx
-
-
def get_opposite_side_bus_from_switch(net, switch_id):
# get the frm and to bus of switch
- line_idx = get_line_idx(net, switch_id)
+ line_idx = net.switch.element.at[switch_id]
is_from_bus = get_from_bus_info_switch(net, switch_id)
if is_from_bus:
@@ -184,33 +175,15 @@ def get_opposite_side_bus_from_bus_line(net, bus_idx, line_idx):
def get_from_bus_info_switch(net, switch_id):
# get the from bus of given switch id
- bus_idx = get_bus_idx(net, switch_id)
- line_idx = get_line_idx(net, switch_id)
-
- for line in net.line.index: # can be written better
- if line == line_idx:
- if bus_idx == net.line.from_bus.at[line_idx]: # asks if switch is at from_bus
- is_from_bus = True
- # sc_fraction = 0.95
- else: # else it is at to_bus
- is_from_bus = False
- # sc_fraction = 0.05
+ bus_idx = net.switch.bus.at[switch_id]
+ line_idx = net.switch.element.at[switch_id]
- return is_from_bus
+ return bus_idx == net.line.from_bus.at[line_idx]
def get_from_bus_info_bus_line(net, bus_idx, line_idx):
# get bus nfo of given line
- for line in net.line.index: # can be written better
- if line == line_idx:
- if bus_idx == net.line.from_bus.at[line_idx]: # asks if switch is at from_bus
- is_from_bus = True
- # sc_fraction = 0.95
- else: # else it is at to_bus
- is_from_bus = False
- # sc_fraction = 0.05
-
- return is_from_bus
+ return bus_idx == net.line.from_bus.at[line_idx]
def get_line_impedance(net, line_idx):
@@ -218,23 +191,19 @@ def get_line_impedance(net, line_idx):
line_length = net.line.length_km.at[line_idx]
line_r_per_km = net.line.r_ohm_per_km.at[line_idx]
line_x_per_km = net.line.x_ohm_per_km.at[line_idx]
- Z_line = complex(line_r_per_km * line_length, line_x_per_km * line_length) # Z = R + jX
- return Z_line
+ z_line = complex(line_r_per_km * line_length, line_x_per_km * line_length) # Z = R + jX
+ return z_line
-def get_lowest_impedance_line(net, lines):
+def get_lowest_impedance_line(net: pandapowerNet, lines):
# get the low impedenceline
- i = 0
+ min_imp_line = None
+ min_impedance = float('inf')
for line in lines:
impedance = abs(get_line_impedance(net, line))
- if i == 0:
- min_imp_line = line
+ if impedance < min_impedance:
min_impedance = impedance
- else:
- if impedance < min_impedance:
- min_impedance = impedance
- min_imp_line = line
- i += 1
+ min_imp_line = line
return min_imp_line
@@ -260,6 +229,42 @@ def fuse_bus_switches(net, bus_switches):
return net
+def get_fault_annotation(net: pandapowerNet, fault_current: float = .0, font_size_bus: float = 0.06) -> PatchCollection:
+ max_bus_idx = max(net.bus.dropna(subset=['geo']).index)
+ fault_text = f'\tI_sc = {fault_current}kA'
+
+ fault_geo_x_y: Tuple[float, float] = next(geojson.utils.coords(geojson.loads(net.bus.geo.at[max_bus_idx])))
+ fault_geo_x_y = (fault_geo_x_y[0], fault_geo_x_y[1] - font_size_bus + 0.02)
+
+ # list of new geo data for line (half position of switch)
+ fault_annotate: PatchCollection = plot.create_annotation_collection(
+ texts=[fault_text],
+ coords=[fault_geo_x_y],
+ size=font_size_bus,
+ prop=None
+ )
+
+ return fault_annotate
+
+
+def get_sc_location_annotation(net: pandapowerNet, sc_location: float, font_size_bus: float = 0.06) -> PatchCollection:
+ max_bus_idx = max(net.bus.dropna(subset=['geo']).index)
+ sc_text = f'\tsc_location: {sc_location * 100}%'
+
+ # list of new geo data for line (middle of position of switch)
+ sc_geo_x_y = next(geojson.utils.coords(geojson.loads(net.bus.geo.at[max_bus_idx])))
+ sc_geo_x_y = (sc_geo_x_y[0], sc_geo_x_y[1] + 0.02)
+
+ sc_annotate: PatchCollection = plot.create_annotation_collection(
+ texts=[sc_text],
+ coords=[sc_geo_x_y],
+ size=font_size_bus,
+ prop=None
+ )
+
+ return sc_annotate
+
+
def plot_tripped_grid(net, trip_decisions, sc_location, bus_size=0.055, plot_annotations=True):
# plot the tripped grid of net_sc
if MPLCURSORS_INSTALLED:
@@ -348,6 +353,8 @@ def plot_tripped_grid(net, trip_decisions, sc_location, bus_size=0.055, plot_ann
line_text = []
line_geodata = []
+ fault_current: float = .0
+
# for Switches in trip_decisions:
for line in net.line.index:
@@ -362,20 +369,22 @@ def plot_tripped_grid(net, trip_decisions, sc_location, bus_size=0.055, plot_ann
respect_in_service=False)
bus_list = list(get_bus_index)
+ bus_coords: List[Tuple[float, float]] = [
+ geojson.utils.coords(geojson.loads(net.bus.geo.at[bus])) for bus in bus_list
+ ]
# TODO:
# place annotations on middle of the line
- line_geo_x = (net.bus_geodata.iloc[bus_list[0]].x + net.bus_geodata.iloc[bus_list[1]].x) / 2
-
- line_geo_y = ((net.bus_geodata.iloc[bus_list[0]].y + net.bus_geodata.iloc[bus_list[1]].y) / 2) + 0.05
+ line_geo_x = (bus_coords[0][0] + bus_coords[1][0]) / 2
+ line_geo_y = ((bus_coords[0][1] + bus_coords[1][1]) / 2) + 0.05
line_geo_x_y = [line_geo_x, line_geo_y]
# list of new geo data for line (half position of switch)
line_geodata.append(tuple(line_geo_x_y))
- fault_current = round(net.res_bus_sc['ikss_ka'].at[max(net.bus.index)],
- 2) # round(Switches['Fault Current'],2)
+ fault_current = round(net.res_bus_sc['ikss_ka'].at[max(net.bus.index)], 2)
+ # round(Switches['Fault Current'],2)
line_text.append(text_line)
@@ -385,67 +394,27 @@ def plot_tripped_grid(net, trip_decisions, sc_location, bus_size=0.055, plot_ann
# Bus Annotatations
bus_text = []
- for i in net.bus_geodata.index:
+ for i in net.bus.geo.dropna().index:
bus_texts = 'bus_' + str(i)
bus_text.append(bus_texts)
bus_text = bus_text[:-1]
- bus_geodata = net.bus_geodata[['x', 'y']]
+ bus_geodata = net.bus.geo.dropna().apply(geojson.loads).apply(geojson.utils.coords).apply(next).to_list()
# placing bus
- bus_geodata['x'] = bus_geodata['x'] - 0.11
- bus_geodata['y'] = bus_geodata['y'] + 0.095
+ bus_index = [(x[0] - 0.11, x[1] + 0.095) for x in bus_geodata]
# TODO:
- bus_index = [tuple(x) for x in bus_geodata.to_numpy()]
bus_annotate = plot.create_annotation_collection(texts=bus_text, coords=bus_index, size=0.06, prop=None)
collection.append(bus_annotate)
# Short circuit annotations
- fault_geodata = []
-
- fault_text = []
-
- fault_texts = ' I_sc = ' + str(fault_current) + 'kA'
-
- font_size_bus = 0.06 # font size of fault location text
-
- fault_geo_x = net.bus_geodata.iloc[max(net.bus_geodata.index)][0]
- fault_geo_y = net.bus_geodata.iloc[max(net.bus_geodata.index)][1] - font_size_bus + 0.02
-
- fault_geo_x_y = [fault_geo_x, fault_geo_y]
-
- # list of new geo data for line (half position of switch)
- fault_geodata.append(tuple(fault_geo_x_y))
-
- fault_text.append(fault_texts)
- fault_annotate = plot.create_annotation_collection(texts=fault_text, coords=fault_geodata, size=0.06, prop=None)
-
- collection.append(fault_annotate)
+ collection.append(get_fault_annotation(net, fault_current))
# sc_location annotation
- sc_text = []
- sc_geodata = []
-
- sc_texts = ' sc_location: ' + str(sc_location * 100) + '%'
-
- # font_size_bus=0.06 # font size of sc location
-
- sc_geo_x = net.bus_geodata.iloc[max(net.bus_geodata.index)][0]
-
- sc_geo_y = net.bus_geodata.iloc[max(net.bus_geodata.index)][1] + 0.02
-
- sc_geo_x_y = [sc_geo_x, sc_geo_y]
-
- # list of new geo data for line (middle of position of switch)
- sc_geodata.append(tuple(sc_geo_x_y))
-
- sc_text.append(sc_texts)
- sc_annotate = plot.create_annotation_collection(texts=sc_text, coords=sc_geodata, size=0.06, prop=None)
-
- collection.append(sc_annotate)
+ collection.append(get_sc_location_annotation(net, sc_location))
# switch annotations
# from pandapower.protection.implemeutility_functions import switch_geodata
@@ -595,7 +564,9 @@ def plot_tripped_grid_protection_device(net, trip_decisions, sc_location, sc_bus
bus_list = list(get_bus_index)
# place annotations on middle of the line
- bus_coords = list(zip(*net.bus.geo.iloc[bus_list[0:2]].apply(geojson.loads).apply(geojson.utils.coords).apply(next).to_list()))
+ bus_coords = list(
+ zip(*net.bus.geo.iloc[bus_list[0:2]].apply(geojson.loads).apply(geojson.utils.coords).apply(
+ next).to_list()))
line_geo_x_y = [sum(x) / 2 for x in bus_coords]
line_geo_x_y[1] += 0.05
@@ -610,7 +581,7 @@ def plot_tripped_grid_protection_device(net, trip_decisions, sc_location, sc_bus
line_annotate = plot.create_annotation_collection(texts=line_text, coords=line_geodata, size=0.06, prop=None)
collection.append(line_annotate)
- # Bus Annotatations
+ # Bus Annotations
bus_text = []
for i in net.bus.index:
bus_texts = f'bus_{i}'
@@ -626,42 +597,13 @@ def plot_tripped_grid_protection_device(net, trip_decisions, sc_location, sc_bus
bus_annotate = plot.create_annotation_collection(texts=bus_text, coords=bus_geodata, size=0.06, prop=None)
collection.append(bus_annotate)
- font_size_bus = 0.06 # font size of fault location text
max_bus_idx = max(net.bus.dropna(subset=['geo']).index)
# Short circuit annotations
- fault_geodata = []
- fault_text = []
- fault_texts = f'\tI_sc = {fault_current}kA'
-
- fault_geo_x_y = next(geojson.utils.coords(geojson.loads(net.bus.geo.at[max_bus_idx])))
- fault_geo_x_y = (fault_geo_x_y[0], fault_geo_x_y[1] - font_size_bus + 0.02)
-
- # list of new geo data for line (half position of switch)
- fault_geodata.append(fault_geo_x_y)
-
- fault_text.append(fault_texts)
- fault_annotate = plot.create_annotation_collection(texts=fault_text, coords=fault_geodata, size=0.06, prop=None)
-
- collection.append(fault_annotate)
+ collection.append(get_fault_annotation(net, fault_current))
# sc_location annotation
- sc_text = []
- sc_geodata = []
- sc_texts = f'\tsc_location: {sc_location * 100}%'
-
- # font_size_bus=0.06 # font size of sc location
-
- sc_geo_x_y = next(geojson.utils.coords(geojson.loads(net.bus.geo.at[max_bus_idx])))
- sc_geo_x_y = (sc_geo_x_y[0], sc_geo_x_y[1] + 0.02)
-
- # list of new geo data for line (middle of position of switch)
- sc_geodata.append(sc_geo_x_y)
-
- sc_text.append(sc_texts)
- sc_annotate = plot.create_annotation_collection(texts=sc_text, coords=sc_geodata, size=0.06, prop=None)
-
- collection.append(sc_annotate)
+ collection.append(get_sc_location_annotation(net, sc_location))
# switch annotations
# from pandapower.protection.utility_functions import switch_geodata
@@ -699,7 +641,7 @@ def get_connected_lines(net, bus_idx):
# first one. E.g. the from_bus given the to_bus of a line.
@deprecated("Use pandapower.next_bus(net, bus, element_id instead!")
def next_buses(net, bus, element_id):
- return pp.next_bus(net,bus,element_id)
+ return pp.next_bus(net, bus, element_id)
# get the connected bus listr from start to end bus
@@ -896,89 +838,59 @@ def bus_path_from_to_bus(net, radial_start_bus, loop_start_bus, end_bus):
return bus_path
-def get_switches_in_path(net, pathes):
+def get_switches_in_path(net, paths):
"""function calculate the switching times from the bus path"""
- Lines_in_path = []
+ lines_in_path: List[List] = []
- for path in pathes:
- Lines_at_path = []
+ for path in paths:
+ lines_at_path: set = set()
for bus in path:
- Lines_at_paths = []
- lines_at_bus = pp.get_connected_elements(net, "l", bus)
-
- for line in lines_at_bus:
- Lines_at_path.append(line)
-
- for Line1 in Lines_at_path:
- if net.line.from_bus[Line1] in path:
- if net.line.to_bus[Line1] in path:
- if Line1 not in Lines_at_paths:
- Lines_at_paths.append(Line1)
-
- Lines_in_path.append(Lines_at_paths)
+ lines_at_path.update(pp.get_connected_elements(net, "l", bus))
- switches_in_net = net.switch.index
- switches_in_path = []
+ lines_at_paths = [
+ line for line in lines_at_path
+ if net.line.from_bus[line] in path and net.line.to_bus[line] in path
+ ]
- for Linepath in Lines_in_path:
- switches_at_path = []
+ lines_in_path.append(lines_at_paths)
- for Line in Linepath:
-
- for switch in switches_in_net:
- if net.switch.et[switch] == "l":
- if net.switch.element[switch] == Line:
- switches_at_path.append(switch)
- switches_in_path.append(switches_at_path)
+ switches_in_path = [
+ [net.switch[(net.switch['et'] == 'l') & (net.switch['element'] == line)].index for line in line_path]
+ for line_path in lines_in_path
+ ]
return switches_in_path
-def get_vi_angle(net, switch_id, powerflow_results=False):
+def get_vi_angle(net: pandapowerNet, switch_id: int, **kwargs) -> float:
"""calculate the angle between voltage and current with reference to voltage"""
- pp.runpp(net)
- line_idx = get_line_idx(net, switch_id)
- bus_idx = get_bus_idx(net, switch_id)
-
- if powerflow_results:
-
- if get_from_bus_info_switch(net, switch_id):
-
- P = net.res_line.p_from_mw.at[line_idx]
- Q = net.res_line.q_from_mvar.at[line_idx]
+ if "powerflow_results" in kwargs:
+ logger.warning(
+ "The powerflow_results argument is deprecated and will be removed in the future."
+ )
- vm = net.bus.vn_kv.at[bus_idx] * net.res_line.vm_from_pu.at[line_idx]
- else:
- P = net.res_line.p_to_mw.at[line_idx]
- Q = net.res_line.q_to_mvar.at[line_idx]
+ pp.runpp(net)
+ line_idx = net.switch.element.at[switch_id]
- vm = net.bus.vn_kv.at[bus_idx] * net.res_line.vm_to_pu.at[line_idx]
+ if get_from_bus_info_switch(net, switch_id):
+ p = net.res_line_sc.p_from_mw.at[line_idx]
+ q = net.res_line_sc.q_from_mvar.at[line_idx]
else:
-
- if get_from_bus_info_switch(net, switch_id):
-
- P = net.res_line_sc.p_from_mw.at[line_idx]
- Q = net.res_line_sc.q_from_mvar.at[line_idx]
-
- vm = net.bus.vn_kv.at[bus_idx] * net.res_line_sc.vm_from_pu.at[line_idx]
-
- else:
- P = net.res_line_sc.p_to_mw.at[line_idx]
- Q = net.res_line_sc.q_to_mvar.at[line_idx]
- vm = net.bus.vn_kv.at[bus_idx] * net.res_line_sc.vm_to_pu.at[line_idx]
-
- if P > 0 and Q > 0:
- vi_angle = math.degrees(math.atan(Q / P))
- elif P < 0 and Q >= 0:
- vi_angle = math.degrees(math.atan(Q / P)) + 180
- elif P < 0 and Q < 0:
- vi_angle = math.degrees(math.atan(Q / P)) - 180
- elif P == 0 and Q > 0:
+ p = net.res_line_sc.p_to_mw.at[line_idx]
+ q = net.res_line_sc.q_to_mvar.at[line_idx]
+
+ if p > 0 and q > 0:
+ vi_angle = math.degrees(math.atan(q / p))
+ elif p < 0 <= q:
+ vi_angle = math.degrees(math.atan(q / p)) + 180
+ elif p < 0 and q < 0:
+ vi_angle = math.degrees(math.atan(q / p)) - 180
+ elif p == 0 < q:
vi_angle = 90
- elif P == 0 and Q < 0:
+ elif p == 0 > q:
vi_angle = -90
else:
vi_angle = math.inf
@@ -1007,8 +919,8 @@ def bus_path_multiple_ext_bus(net):
elif len(from_bus_path) != len(to_bus_path):
if len(from_bus_path) > 1 and len(to_bus_path) > 1:
- minlen = min(len(from_bus_path), len(to_bus_path))
- if from_bus_path[minlen - 1] != to_bus_path[minlen - 1]:
+ min_len = min(len(from_bus_path), len(to_bus_path))
+ if from_bus_path[min_len - 1] != to_bus_path[min_len - 1]:
if len(from_bus_path) < len(to_bus_path):
from_bus_path.append(to_bus_path[-1])
max_bus_path.append(from_bus_path)
@@ -1025,19 +937,19 @@ def bus_path_multiple_ext_bus(net):
return bus_path
- # get the line path from the given bus path
+# get the line path from the given bus path
def get_line_path(net, bus_path):
""" Function return the list of line path from the given bus path"""
- line_path=[]
- for i in range(len(bus_path)-1):
- bus1=bus_path[i]
- bus2=bus_path[i+1]
- line1=net.line[(net.line.from_bus==bus1) & (net.line.to_bus==bus2)].index.to_list()
- line2=net.line[(net.line.from_bus==bus2) & (net.line.to_bus==bus1)].index.to_list()
- if len(line2)==0:
+ line_path = []
+ for i in range(len(bus_path) - 1):
+ bus1 = bus_path[i]
+ bus2 = bus_path[i + 1]
+ line1 = net.line[(net.line.from_bus == bus1) & (net.line.to_bus == bus2)].index.to_list()
+ line2 = net.line[(net.line.from_bus == bus2) & (net.line.to_bus == bus1)].index.to_list()
+ if len(line2) == 0:
line_path.append(line1[0])
- if len(line1)==0:
+ if len(line1) == 0:
line_path.append(line2[0])
return line_path
diff --git a/pandapower/pypower/printpf.py b/pandapower/pypower/printpf.py
index d6a8e6573..8a0d9cec0 100644
--- a/pandapower/pypower/printpf.py
+++ b/pandapower/pypower/printpf.py
@@ -659,7 +659,7 @@ def printpf(baseMVA, bus=None, gen=None, branch=None, f=None, success=None,
fd.write('\n================================================================================')
fd.write('\n| Branch Flow Constraints |')
fd.write('\n================================================================================')
- fd.write('\nBrnch From "From" End Limit "To" End To')
+ fd.write('\nBranch From "From" End Limit "To" End To')
fd.write(strg)
fd.write('\n----- ----- ------- -------- -------- -------- ------- -----')
for i in range(nl):
diff --git a/pandapower/run.py b/pandapower/run.py
index f77e4f6ee..2ba35b72a 100644
--- a/pandapower/run.py
+++ b/pandapower/run.py
@@ -174,12 +174,12 @@ def runpp(net, algorithm='nr', calculate_voltage_angles=True, init="auto",
**KWARGS**:
- **lightsim2grid** ((bool,str), "auto") - whether to use the package lightsim2grid for power flow backend
+ **lightsim2grid** ((bool,str), "auto") - whether to use the package lightsim2grid for power
+ flow backend. For more details on compatibility, check out pandapower's documentation.
- **numba** (bool, True) - Activation of numba JIT compiler in the newton solver
-
- If set to True, the numba JIT compiler is used to generate matrices for the powerflow,
- which leads to significant speed improvements.
+ **numba** (bool, True) - Activation of numba JIT compiler in the newton solver.
+ If set to True, the numba JIT compiler is used to generate matrices for the powerflow,
+ which leads to significant speed improvements.
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If the impedance of switches
defined in net.switch.z_ohm is zero, buses connected by a closed bus-bus switch are fused to
diff --git a/pandapower/test/converter/jao_testfiles/testfile.xlsx b/pandapower/test/converter/jao_testfiles/testfile.xlsx
new file mode 100644
index 000000000..01dbecacb
Binary files /dev/null and b/pandapower/test/converter/jao_testfiles/testfile.xlsx differ
diff --git a/pandapower/test/converter/test_from_jao.py b/pandapower/test/converter/test_from_jao.py
new file mode 100644
index 000000000..100f0fdc4
--- /dev/null
+++ b/pandapower/test/converter/test_from_jao.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016-2024 by University of Kassel and Fraunhofer Institute for Energy Economics
+# and Energy System Technology (IEE), Kassel. All rights reserved.
+
+from copy import deepcopy
+import os
+import pytest
+import numpy as np
+import pandas as pd
+
+import pandapower as pp
+from pandapower.converter import from_jao
+
+
+def test_from_jao_with_testfile():
+ testfile = os.path.join(pp.pp_dir, 'test', 'converter', "jao_testfiles", "testfile.xlsx")
+ assert os.path.isfile(testfile)
+
+ # --- net1
+ net1 = from_jao(testfile, None, False)
+
+ assert len(net1.bus) == 10
+ assert len(net1.line) == 7
+ assert net1.line.Tieline.sum() == 2
+ assert len(net1.trafo) == 1
+
+ # line data conversion
+ assert np.all((0.01 < net1.line[['r_ohm_per_km', 'x_ohm_per_km']]) & (
+ net1.line[['r_ohm_per_km', 'x_ohm_per_km']] < 0.4))
+ assert np.all((0.5 < net1.line['c_nf_per_km']) & (net1.line['c_nf_per_km'] < 25))
+ assert np.all(net1.line['g_us_per_km'] < 1)
+ assert np.all((0.2 < net1.line['max_i_ka']) & (net1.line['max_i_ka'] < 5))
+
+ # trafo data conversion
+ assert 100 < net1.trafo.sn_mva.iat[0] < 1000
+ assert 6 < net1.trafo.vk_percent.iat[0] < 65
+ assert 0.25 < net1.trafo.vkr_percent.iat[0] < 1.2
+ assert 10 < net1.trafo.pfe_kw.iat[0] < 1000
+ assert net1.trafo.i0_percent.iat[0] < 0.1
+ assert np.isclose(net1.trafo.shift_degree.iat[0], 90)
+ assert np.isclose(net1.trafo.tap_step_degree.iat[0], 1.794)
+ assert net1.trafo.tap_min.iat[0] == -17
+ assert net1.trafo.tap_max.iat[0] == 17
+
+ # --- net2
+ net2 = from_jao(testfile, None, True)
+ pp.nets_equal(net1, net2) # extend_data_for_grid_group_connections makes no difference here
+
+ # --- net3
+ net3 = from_jao(testfile, None, True, drop_grid_groups_islands=True)
+ assert len(net3.bus) == 6
+ assert len(net3.line) == 5
+ assert net3.line.Tieline.sum() == 1
+ assert len(net3.trafo) == 1
+
+
+if __name__ == '__main__':
+ test_from_jao_with_testfile()
+ # pytest.main([__file__, "-xs"])
\ No newline at end of file
diff --git a/pandapower/test/opf/test_pandamodels_runpm.py b/pandapower/test/opf/test_pandamodels_runpm.py
index 4c5e4b8f3..ff874a2ac 100644
--- a/pandapower/test/opf/test_pandamodels_runpm.py
+++ b/pandapower/test/opf/test_pandamodels_runpm.py
@@ -208,7 +208,7 @@ def test_compare_pwl_and_poly(net_3w_trafo_opf):
pp.create_poly_cost(net, 1, 'gen', cp1_eur_per_mw=2)
# pp.runopp(net)
- pp.runpm_ac_opf(net, correct_pm_network_data=False)
+ pp.runpm_ac_opf(net)
consistency_checks(net)
np.allclose(p_gen, net.res_gen.p_mw.values)
@@ -217,7 +217,7 @@ def test_compare_pwl_and_poly(net_3w_trafo_opf):
np.allclose(va_bus, net.res_bus.va_degree.values)
# pp.rundcopp(net)
- pp.runpm_dc_opf(net, correct_pm_network_data=False)
+ pp.runpm_dc_opf(net)
consistency_checks(net, test_q=False)
np.allclose(p_gen, net.res_gen.p_mw.values)
diff --git a/pandapower/timeseries/ts_runpp.py b/pandapower/timeseries/ts_runpp.py
index 431f02920..708eeb3b6 100644
--- a/pandapower/timeseries/ts_runpp.py
+++ b/pandapower/timeseries/ts_runpp.py
@@ -1,6 +1,4 @@
import inspect
-import collections
-import functools
import numpy as np
from numpy import complex128, zeros
@@ -12,20 +10,17 @@
from pandapower.control.controller.trafo_control import TrafoController
from pandapower.auxiliary import _clean_up
from pandapower.build_branch import _calc_trafo_parameter, _calc_trafo3w_parameter
-from pandapower.build_bus import _calc_pq_elements_and_add_on_ppc, \
- _calc_shunts_and_add_on_ppc
-from pandapower.pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A
+from pandapower.build_bus import _calc_pq_elements_and_add_on_ppc
from pandapower.pypower.idx_bus import PD, QD
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.idx_bus_dc import DC_PD
-from pandapower.pypower.makeSbus import _get_Sbus, _get_Cg, makeSbus
+from pandapower.pypower.makeSbus import makeSbus
from pandapower.pf.pfsoln_numba import pfsoln as pfsoln_full, pf_solution_single_slack
from pandapower.powerflow import LoadflowNotConverged, _add_auxiliary_elements
from pandapower.results import _copy_results_ppci_to_ppc, _extract_results, _get_aranged_lookup
from pandapower.results_branch import _get_branch_flows, _get_line_results, _get_trafo3w_results, _get_trafo_results
-from pandapower.results_bus import write_pq_results_to_element, _get_bus_v_results, _get_bus_results, _get_bus_dc_results
+from pandapower.results_bus import _get_bus_results, _get_bus_dc_results
from pandapower.results_gen import _get_gen_results
-from pandapower.timeseries.output_writer import OutputWriter
try:
import pandaplan.core.pplog as logging
diff --git a/pyproject.toml b/pyproject.toml
index 98b08cea7..f3906d484 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -59,7 +59,7 @@ Download = "https://pypi.org/project/pandapower/#files"
Changelog = "https://github.com/e2nIEE/pandapower/blob/develop/CHANGELOG.rst"
[project.optional-dependencies]
-docs = ["numpydoc", "matplotlib", "sphinx", "sphinx_rtd_theme", "sphinx-pyproject"]
+docs = ["numpydoc>=1.5.0", "matplotlib", "sphinx>=5.3.0", "sphinx_rtd_theme>=1.1.1", "sphinx-pyproject"]
plotting = ["plotly>=3.1.1", "matplotlib", "igraph", "geopandas>=1.0"]
test = ["pytest~=8.1", "pytest-xdist", "nbmake"]
performance = ["ortools", "numba>=0.25", "lightsim2grid==0.9.0"]
@@ -68,7 +68,7 @@ converter = ["matpowercaseframes"]
pgm = ["power-grid-model-io"]
control = ["shapely"]
all = [
- "numpydoc", "sphinx", "sphinx_rtd_theme", "sphinx-pyproject",
+ "numpydoc>=1.5.0", "sphinx>=5.3.0", "sphinx_rtd_theme>=1.1.1", "sphinx-pyproject",
"plotly>=3.1.1", "matplotlib", "igraph", "geopandas>=1.0",
"pytest~=8.1", "pytest-xdist", "nbmake",
"ortools", "numba>=0.25", "lightsim2grid==0.9.0",