diff --git a/README.rst b/README.rst index 32acfb4d..df34c2b3 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,7 @@ +.. image:: https://readthedocs.org/projects/etrago/badge/?version=latest + :target: http://etrago.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + eTraGo ====== diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 00000000..00550eab --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,192 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ding0.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ding0.qhc" + +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/ding0" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ding0" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/about.rst b/doc/about.rst new file mode 100644 index 00000000..1d30052a --- /dev/null +++ b/doc/about.rst @@ -0,0 +1,8 @@ +eTraGo +====== + +Optimization of flexibility options for transmission grids based on PyPSA + +A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the distribution grid. The integration of the transmission and 'upper' distribution grid is part of eTraGo. + +The focus of optimization are flexibility options with a special focus on energy storages. Grid expansion measures are not part of this tool and will be instead part of 'eGo' https://github.com/openego/eGo diff --git a/doc/api.rst b/doc/api.rst new file mode 100644 index 00000000..ef08f3e3 --- /dev/null +++ b/doc/api.rst @@ -0,0 +1,7 @@ +.. make doc-string generated documentation appear here + +.. toctree:: + :maxdepth: 7 + :titlesonly: + + eTraGo API diff --git a/doc/api/etrago.cluster.rst b/doc/api/etrago.cluster.rst new file mode 100644 index 00000000..892ff361 --- /dev/null +++ b/doc/api/etrago.cluster.rst @@ -0,0 +1,30 @@ +etrago\.cluster package +======================= + +Submodules +---------- + +etrago\.cluster\.networkclustering module +----------------------------------------- + +.. automodule:: etrago.cluster.networkclustering + :members: + :undoc-members: + :show-inheritance: + +etrago\.cluster\.snapshot module +-------------------------------- + +.. automodule:: etrago.cluster.snapshot + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: etrago.cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/etrago.extras.rst b/doc/api/etrago.extras.rst new file mode 100644 index 00000000..0b051970 --- /dev/null +++ b/doc/api/etrago.extras.rst @@ -0,0 +1,22 @@ +etrago\.extras package +====================== + +Submodules +---------- + +etrago\.extras\.utilities module +-------------------------------- + +.. automodule:: etrago.extras.utilities + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: etrago.extras + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/etrago.plots.rst b/doc/api/etrago.plots.rst new file mode 100644 index 00000000..79fd9305 --- /dev/null +++ b/doc/api/etrago.plots.rst @@ -0,0 +1,22 @@ +etrago\.plots package +===================== + +Submodules +---------- + +etrago\.plots\.snapshot\_clustering module +------------------------------------------ + +.. automodule:: etrago.plots.snapshot_clustering + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: etrago.plots + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/etrago.rst b/doc/api/etrago.rst new file mode 100644 index 00000000..aad6caf3 --- /dev/null +++ b/doc/api/etrago.rst @@ -0,0 +1,39 @@ +etrago package +============== + +Subpackages +----------- + +.. toctree:: + + etrago.cluster + etrago.extras + etrago.plots + +Submodules +---------- + +etrago\.appl module +------------------- + +.. automodule:: etrago.appl + :members: + :undoc-members: + :show-inheritance: + +etrago\.snaphot\_clustering\_app module +--------------------------------------- + +.. automodule:: etrago.snaphot_clustering_app + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: etrago + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/modules.rst b/doc/api/modules.rst new file mode 100644 index 00000000..fda8d67d --- /dev/null +++ b/doc/api/modules.rst @@ -0,0 +1,7 @@ +etrago +====== + +.. toctree:: + :maxdepth: 4 + + etrago diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 00000000..a5907033 --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,349 @@ +"""This file is part of eTraGO + +It is developed in the project open_eGo: https://openegoproject.wordpress.com + +eTraGo lives at github: https://github.com/openego/etrago/ +The documentation is available on RTD: https://etrago.readthedocs.io""" + + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "wolf_bunke" + + +# -*- coding: utf-8 -*- +# +# eTraGo documentation build configuration file, created by +# sphinx-quickstart on Fri Sep 29 10:55:47 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex +from unittest.mock import MagicMock +#from mock import Mock as MagicMock + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('../')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.imgmath', + 'sphinx.ext.viewcode', +# 'sphinxcontrib.napoleon',#enable Napoleon interpreter of docstrings Sphinx v<=1.2 + 'sphinx.ext.napoleon', #enable Napoleon Sphinx v>1.3 +# 'sphinx_paramlinks',#to have links to the types of the parameters of the functions + 'numpydoc', +] + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = False +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = False +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = True +napoleon_use_param = True +napoleon_use_rtype = True +napoleon_use_keyword = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'eTraGo - Optimization of flexibility options for transmission grids based on PyPSA' +copyright = u'2015-2017, Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems' +author = u'ulfmueller, lukasol, wolfbunke, mariusves, s3pp' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.4' +# The full version, including alpha/beta/rc tags. +release = '0.4' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build', 'whatsnew'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# Fix import error of modules which depend on C modules (mock out the imports for these modules) +# see http://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules +if 'READTHEDOCS' in os.environ: + class Mock(MagicMock): + @classmethod + def __getattr__(cls, name): + return MagicMock() + + #MOCK_MODULES = ['libgeos', 'geos', 'libgeos_c', 'geos_c'] + # ToDo: Change to eTraGo + MOCK_MODULES = ['numpy', 'scipy', 'pandas.dataframe', 'pypsa'] + + sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# html_theme = 'alabaster' + +import sphinx_rtd_theme +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'etragodoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', + +# Latex figure (float) alignment +#'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'etrago.tex', u'eTraGo Documentation', + u'open_eGo-Team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'eTraGo', u'eTraGo Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'eTraGo', u'eTraGo Documentation', + author, 'eTraGo', 'electrical Transmission Grid Optimization of flexibility options for transmission grids based on PyPSA', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/': None} + +# Numbered figures +numfig = True diff --git a/doc/getting_started.rst b/doc/getting_started.rst new file mode 100644 index 00000000..737f5ad8 --- /dev/null +++ b/doc/getting_started.rst @@ -0,0 +1,24 @@ +Setup +========================= + + +Run: + + ``` + git clone https://github.com/openego/eTraGo + ``` + +Create a virtualenvironment (where you like it) and activate it: + + ``` + virtualenv -p python3 venv + source venv/bin/activate + ``` + +With your activated environment `cd` to the cloned directory and run: + + ``` + pip install -e eTraGo + ``` + +This will install all needed packages into your environment. Now you should be ready to go. diff --git a/doc/images/etrago_logo.png b/doc/images/etrago_logo.png new file mode 100644 index 00000000..c5fb21c0 Binary files /dev/null and b/doc/images/etrago_logo.png differ diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 00000000..356b36fa --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,47 @@ +.. eTraGo documentation master file, created by + sphinx-quickstart on Fri Sep 29 10:55:47 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to eTraGo's documentation! +================================== + + +.. figure:: images/etrago_logo.png + :align: right + :scale: 80% + +Optimization of flexibility options for transmission grids based on PyPSA + +A speciality in this context is that transmission grids are described by the 380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the distribution grid. The integration of the transmission and 'upper' distribution grid is part of eTraGo. + +The focus of optimization are flexibility options with a special focus on energy storages. Grid expansion measures are not part of this tool and will be instead part of 'eGo' https://github.com/openego/eGo + +.. warning:: Note, eTraGo is still under constraction + + + + +.. toctree:: + :maxdepth: 7 + :titlesonly: + + welcome + getting_started + usage_details + about + + whatsnew + + + api + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/make.bat b/doc/make.bat new file mode 100644 index 00000000..660e908c --- /dev/null +++ b/doc/make.bat @@ -0,0 +1,263 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + echo. coverage to run coverage check of the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 2> nul +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\ding0.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\ding0.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +if "%1" == "coverage" ( + %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage + if errorlevel 1 exit /b 1 + echo. + echo.Testing of coverage in the sources finished, look at the ^ +results in %BUILDDIR%/coverage/python.txt. + goto end +) + +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + +:end diff --git a/doc/usage_details.rst b/doc/usage_details.rst new file mode 100644 index 00000000..e8574bb4 --- /dev/null +++ b/doc/usage_details.rst @@ -0,0 +1,42 @@ +.. _eTraGo-examples: + +How to use eTraGo? +~~~~~~~~~~~~~~~~~~ + +Examples +======== + + + + +Usage +===== + +Text + + +**First**, First + +.. code-block:: python + + # make setting + results = etrago(argd) + + +**Second**, get plots.. + +.. code-block:: python + + # Text Text + results.plot() + + + +Explanation of key figures +-------------------------- + +========= ======================================= ==== +Parameter Description Unit +========= ======================================= ==== +xxxx yyy MW +========= ======================================= ==== diff --git a/doc/welcome.rst b/doc/welcome.rst new file mode 100644 index 00000000..edf5307e --- /dev/null +++ b/doc/welcome.rst @@ -0,0 +1,13 @@ +##################### +What is eTraGo about? +##################### + +**WARNING** + + + +This software project is part of the research project +`open_eGo `_. + + + diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst new file mode 100644 index 00000000..f1442a8c --- /dev/null +++ b/doc/whatsnew.rst @@ -0,0 +1,12 @@ +What's New +~~~~~~~~~~ + +See what's new as per release! + +.. contents:: `Releases` + :depth: 1 + :local: + :backlinks: top + + +.. include:: whatsnew/v0-1-0.rst diff --git a/doc/whatsnew/v0-1-0.rst b/doc/whatsnew/v0-1-0.rst new file mode 100644 index 00000000..369a56c7 --- /dev/null +++ b/doc/whatsnew/v0-1-0.rst @@ -0,0 +1,6 @@ +Release v0.1.0 (XY XX, 2017) +++++++++++++++++++++++++++++++ + +As this is the first release of eTraGo + + diff --git a/etrago/__pycache__/__init__.cpython-36.pyc b/etrago/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..ac47d23b Binary files /dev/null and b/etrago/__pycache__/__init__.cpython-36.pyc differ diff --git a/etrago/appl.py b/etrago/appl.py index 9d92941f..0c2873e7 100644 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -1,55 +1,192 @@ -"""This is the docstring for the example.py module. Modules names should -have short, all-lowercase names. The module name may have underscores if -this improves readability. -Every module should have a docstring at the very top of the file. The -module's docstring may extend over multiple lines. If your docstring does -extend over multiple lines, the closing three quotation marks must be on -a line by itself, preferably preceded by a blank line.""" - -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" +""" +This is the application file for the tool eTraGo. + +Define your connection parameters and power flow settings before executing the function etrago. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation; either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . + +""" + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "ulfmueller, lukasol, wolfbunke, mariusves, s3pp" import numpy as np from numpy import genfromtxt np.random.seed() -from egopowerflow.tools.tools import oedb_session -from egopowerflow.tools.io import NetworkScenario, results_to_oedb import time -from egopowerflow.tools.plot import (plot_line_loading, plot_stacked_gen, +from etrago.tools.io import NetworkScenario, results_to_oedb +from etrago.tools.plot import (plot_line_loading, plot_stacked_gen, add_coordinates, curtailment, gen_dist, storage_distribution) +from etrago.tools.utilities import oedb_session, load_shedding, data_manipulation_sh, results_to_csv, parallelisation, pf_post_lopf, loading_minimization, calc_line_losses, group_parallel_lines +from etrago.cluster.networkclustering import busmap_from_psql, cluster_on_extra_high_voltage, kmean_clustering -from etrago.extras.utilities import load_shedding, data_manipulation_sh, results_to_csv, parallelisation, pf_post_lopf, loading_minimization, calc_line_losses, kmean_clustering, group_parallel_lines - -from etrago.cluster.networkclustering import busmap_from_psql, cluster_on_extra_high_voltage -from pypsa.networkclustering import busmap_by_kmeans, get_clustering_from_busmap -import pandas as pd - -args = {'network_clustering':False, +args = {# Setup and Configuration: 'db': 'oedb', # db session - 'gridversion':'v0.2.11', #None for model_draft or Version number (e.g. v0.2.10) for grid schema + 'gridversion': 'v0.2.11', # None for model_draft or Version number (e.g. v0.2.11) for grid schema 'method': 'lopf', # lopf or pf - 'pf_post_lopf': False, #state whether you want to perform a pf after a lopf simulation - 'start_snapshot': 2320, - 'end_snapshot' : 2321, - 'scn_name': 'SH NEP 2035', - 'lpfile': False, # state if and where you want to save pyomo's lp file: False or '/path/tofolder' - 'results': False , # state if and where you want to save results as csv: False or '/path/tofolder' + 'pf_post_lopf': False, # state whether you want to perform a pf after a lopf simulation + 'start_snapshot': 1, + 'end_snapshot' : 24, + 'scn_name': 'Status Quo', # state which scenario you want to run: Status Quo, NEP 2035, eGo100 + 'solver': 'glpk', # glpk, cplex or gurobi + # Export options: + 'lpfile': False, # state if and where you want to save pyomo's lp file: False or /path/tofolder + 'results': False, # state if and where you want to save results as csv: False or /path/tofolder 'export': False, # state if you want to export the results back to the database - 'solver': 'gurobi', #glpk, cplex or gurobi - 'branch_capacity_factor': 1, #to globally extend or lower branch capacities - 'storage_extendable':True, - 'load_shedding':True, - 'generator_noise':True, + # Settings: + 'storage_extendable':True, # state if you want storages to be installed at each node if necessary. + 'generator_noise':True, # state if you want to apply a small generator noise + 'reproduce_noise': False, # state if you want to use a predefined set of random noise for the given scenario. if so, provide path, e.g. 'noise_values.csv' 'minimize_loading':False, - 'k_mean_clustering': False, - 'parallelisation':False, - 'line_grouping': False, - 'comments': None} + # Clustering: + 'k_mean_clustering': False, # state if you want to perform a k-means clustering on the given network. State False or the value k (e.g. 20). + 'network_clustering': True, # state if you want to perform a clustering of HV buses to EHV buses. + # Simplifications: + 'parallelisation':False, # state if you want to run snapshots parallely. + 'line_grouping': True, # state if you want to group lines running between the same buses. + 'branch_capacity_factor': 1, # globally extend or lower branch capacities + 'load_shedding':True, # meet the demand at very high cost; for debugging purposes. + 'comments':None } def etrago(args): + """The etrago function works with following arguments: + + + Parameters + ---------- + + db (str): + 'oedb', + Name of Database session setting stored in config.ini of oemof.db + + gridversion (str): + 'v0.2.11', + Name of the data version number of oedb: state 'None' for + model_draft (sand-box) or an explicit version number + (e.g. 'v0.2.10') for the grid schema. + + method (str): + 'lopf', + Choose between a non-linear power flow ('pf') or + a linear optimal power flow ('lopf'). + + pf_post_lopf (bool): + False, + Option to run a non-linear power flow (pf) directly after the + linear optimal power flow (and thus the dispatch) has finished. + + start_snapshot (int): + 1, + Start hour of the scenario year to be calculated. + + end_snapshot (int) : + 2, + End hour of the scenario year to be calculated. + + scn_name (str): + 'Status Quo', + Choose your scenario. Currently, there are three different + scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not + want to use the full German dataset, you can use the excerpt of + Schleswig-Holstein by adding the acronym SH to the scenario + name (e.g. 'SH Status Quo'). + + solver (str): + 'glpk', + Choose your preferred solver. Current options: 'glpk' (open-source), + 'cplex' or 'gurobi'. + + lpfile (obj): + False, + State if and where you want to save pyomo's lp file. Options: + False or '/path/tofolder'. + + results (obj): + False, + State if and where you want to save results as csv files.Options: + False or '/path/tofolder'. + + export (bool): + False, + State if you want to export the results of your calculation + back to the database. + + storage_extendable (bool): + True, + Choose if you want to allow to install extendable storages + (unlimited in size) at each grid node in order to meet the flexibility demand. + + generator_noise (bool): + True, + Choose if you want to apply a small random noise to the marginal + costs of each generator in order to prevent an optima plateau. + + reproduce_noise (obj): + False, + State if you want to use a predefined set of random noise for + the given scenario. If so, provide path to the csv file, + e.g. 'noise_values.csv'. + + minimize_loading (bool): + False, + + k_mean_clustering (bool): + False, + State if you want to apply a clustering of all network buses down to + only 'k' buses. The weighting takes place considering generation and load + at each node. + If so, state the number of k you want to apply. Otherwise put False. + + network_clustering (bool): + False, + Choose if you want to cluster the full HV/EHV dataset down to only the EHV + buses. In that case, all HV buses are assigned to their closest EHV sub-station, + taking into account the shortest distance on power lines. + + parallelisation (bool): + False, + Choose if you want to calculate a certain number of snapshots in parallel. If + yes, define the respective amount in the if-clause execution below. Otherwise + state False here. + + line_grouping (bool): + True, + State if you want to group lines that connect the same two buses into one system. + + branch_capacity_factor (numeric): + 1, + Add a factor here if you want to globally change line capacities (e.g. to "consider" + an (n-1) criterion or for debugging purposes. + + load_shedding (bool): + False, + State here if you want to make use of the load shedding function which is helpful when + debugging: a very expensive generator is set to each bus and meets the demand when regular + generators cannot do so. + + comments (str): + None + + Result: + ------- + + + """ + session = oedb_session(args['db']) # additional arguments cfgpath, version, prefix @@ -70,14 +207,9 @@ def etrago(args): # add coordinates network = add_coordinates(network) - - # create generator noise - noise_values = network.generators.marginal_cost + abs(np.random.normal(0,0.001,len(network.generators.marginal_cost))) - np.savetxt("noise_values.csv", noise_values, delimiter=",") - noise_values = genfromtxt('noise_values.csv', delimiter=',') - + # TEMPORARY vague adjustment due to transformer bug in data processing - #network.transformers.x=network.transformers.x*0.01 + network.transformers.x=network.transformers.x*0.0001 if args['branch_capacity_factor']: @@ -85,13 +217,19 @@ def etrago(args): network.transformers.s_nom = network.transformers.s_nom*args['branch_capacity_factor'] if args['generator_noise']: - # create generator noise - noise_values = network.generators.marginal_cost + abs(np.random.normal(0,0.001,len(network.generators.marginal_cost))) - np.savetxt("noise_values.csv", noise_values, delimiter=",") - noise_values = genfromtxt('noise_values.csv', delimiter=',') - # add random noise to all generator - network.generators.marginal_cost = noise_values - + # create or reproduce generator noise + if not args['reproduce_noise'] == False: + noise_values = genfromtxt('noise_values.csv', delimiter=',') + # add random noise to all generator + network.generators.marginal_cost = noise_values + else: + noise_values = network.generators.marginal_cost + abs(np.random.normal(0,0.001,len(network.generators.marginal_cost))) + np.savetxt("noise_values.csv", noise_values, delimiter=",") + noise_values = genfromtxt('noise_values.csv', delimiter=',') + # add random noise to all generator + network.generators.marginal_cost = noise_values + + if args['storage_extendable']: # set virtual storages to be extendable if network.storage_units.source.any()=='extendable_storage': @@ -119,8 +257,8 @@ def etrago(args): network = cluster_on_extra_high_voltage(network, busmap, with_time=True) # k-mean clustering - if args['k_mean_clustering']: - network = kmean_clustering(network) + if not args['k_mean_clustering'] == False: + network = kmean_clustering(network, n_clusters=args['k_mean_clustering']) # Branch loading minimization if args['minimize_loading']: @@ -174,10 +312,8 @@ def etrago(args): # make a line loading plot plot_line_loading(network) - # plot stacked sum of nominal power for each generator type and timestep plot_stacked_gen(network, resolution="MW") - # plot to show extendable storages storage_distribution(network) diff --git a/etrago/cluster/networkclustering.py b/etrago/cluster/networkclustering.py index 36f7fb5b..f4989000 100644 --- a/etrago/cluster/networkclustering.py +++ b/etrago/cluster/networkclustering.py @@ -1,5 +1,29 @@ -from etrago.extras.utilities import * -from pypsa.networkclustering import aggregatebuses, aggregateoneport, aggregategenerators +""" +Networkclustering.py defines the methods to cluster power grid +networks for application within the tool eTraGo. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation; either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +""" + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "s3pp, wolfbunke, ulfmueller, lukasol" + + +from etrago.tools.utilities import * +from pypsa.networkclustering import aggregatebuses, aggregateoneport, aggregategenerators, get_clustering_from_busmap, busmap_by_kmeans from egoio.db_tables.model_draft import EgoGridPfHvBusmap from itertools import product import networkx as nx @@ -247,3 +271,75 @@ def fetch(): busmap = fetch() return busmap + +def kmean_clustering(network, n_clusters=10): + """ Implement k-mean clustering in existing network + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + Returns + ------- + + """ + def weighting_for_scenario(x): + b_i = x.index + g = normed(gen.reindex(b_i, fill_value=0)) + l = normed(load.reindex(b_i, fill_value=0)) + + w= g + l + return (w * (100. / w.max())).astype(int) + + def normed(x): + return (x/x.sum()).fillna(0.) + + print('start k-mean clustering') + # prepare k-mean + # k-means clustering (first try) + network.generators.control="PV" + network.buses['v_nom'] = 380. + # problem our lines have no v_nom. this is implicitly defined by the connected buses: + network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) + + # adjust the x of the lines which are not 380. + lines_v_nom_b = network.lines.v_nom != 380 + network.lines.loc[lines_v_nom_b, 'x'] *= (380./network.lines.loc[lines_v_nom_b, 'v_nom'])**2 + network.lines.loc[lines_v_nom_b, 'v_nom'] = 380. + + trafo_index = network.transformers.index + transformer_voltages = pd.concat([network.transformers.bus0.map(network.buses.v_nom), network.transformers.bus1.map(network.buses.v_nom)], axis=1) + + + network.import_components_from_dataframe( + network.transformers.loc[:,['bus0','bus1','x','s_nom']] + .assign(x=network.transformers.x*(380./transformer_voltages.max(axis=1))**2) + .set_index('T' + trafo_index), + 'Line') + network.transformers.drop(trafo_index, inplace=True) + + for attr in network.transformers_t: + network.transformers_t[attr] = network.transformers_t[attr].reindex(columns=[]) + + #ToDo: change conv to types minus wind and solar + conv_types = {'biomass', 'run_of_river', 'gas', 'oil','coal', 'waste','uranium'} + # Attention: network.generators.carrier.unique() + # conv_types only for SH scenario defined! + gen = (network.generators.loc[network.generators.carrier.isin(conv_types) + ].groupby('bus').p_nom.sum().reindex(network.buses.index, + fill_value=0.) + network.storage_units.loc[network.storage_units.carrier.isin(conv_types) + ].groupby('bus').p_nom.sum().reindex(network.buses.index, fill_value=0.)) + + load = network.loads_t.p_set.mean().groupby(network.loads.bus).sum() + + # k-mean clustering + # busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(np.repeat(1, + # len(network.buses)), index=network.buses.index) , n_clusters= 10) + weight = weighting_for_scenario(network.buses).reindex(network.buses.index, fill_value=1) + busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(weight), buses_i=network.buses.index , n_clusters=n_clusters) + + + # ToDo change function in order to use bus_strategies or similar + clustering = get_clustering_from_busmap(network, busmap) + network = clustering.network + #network = cluster_on_extra_high_voltage(network, busmap, with_time=True) + + return network diff --git a/etrago/extras/utilities.py b/etrago/extras/utilities.py deleted file mode 100644 index 2c37bd0f..00000000 --- a/etrago/extras/utilities.py +++ /dev/null @@ -1,403 +0,0 @@ -import pandas as pd -import numpy as np -import os -import time -from pyomo.environ import (Var,Constraint, PositiveReals,ConcreteModel) -from pypsa.networkclustering import busmap_by_kmeans, get_clustering_from_busmap - -def buses_of_vlvl(network, voltage_level): - """ Get bus-ids of given voltage level(s). - - Parameters - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - voltage_level: list - - Returns - ------- - list - List containing bus-ids. - """ - - mask = network.buses.v_nom.isin(voltage_level) - df = network.buses[mask] - - return df.index - - -def buses_grid_linked(network, voltage_level): - """ Get bus-ids of a given voltage level connected to the grid. - - Parameters - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - voltage_level: list - - Returns - ------- - list - List containing bus-ids. - """ - - mask = ((network.buses.index.isin(network.lines.bus0) | - (network.buses.index.isin(network.lines.bus1))) & - (network.buses.v_nom.isin(voltage_level))) - - df = network.buses[mask] - - return df.index - - -def connected_grid_lines(network, busids): - """ Get grid lines connected to given buses. - - Parameters - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - busids : list - List containing bus-ids. - - Returns - ------- - :class:`pandas.DataFrame - PyPSA lines. - """ - - mask = network.lines.bus1.isin(busids) |\ - network.lines.bus0.isin(busids) - - return network.lines[mask] - - -def connected_transformer(network, busids): - """ Get transformer connected to given buses. - - Parameters - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - busids : list - List containing bus-ids. - - Returns - ------- - :class:`pandas.DataFrame - PyPSA transformer. - """ - - mask = (network.transformers.bus0.isin(busids)) - - return network.transformers[mask] - - -def load_shedding (network, **kwargs): - """ Implement load shedding in existing network to identify feasibility problems - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - marginal_cost : int - Marginal costs for load shedding - p_nom : int - Installed capacity of load shedding generator - Returns - ------- - - """ - - marginal_cost_def = 10000#network.generators.marginal_cost.max()*2 - p_nom_def = network.loads_t.p_set.max().max() - - marginal_cost = kwargs.get('marginal_cost', marginal_cost_def) - p_nom = kwargs.get('p_nom', p_nom_def) - - network.add("Carrier", "load") - start = network.buses.index.astype(int).max() - nums = len(network.buses.index) - end = start+nums - index = list(range(start,end)) - index = [str(x) for x in index] - network.import_components_from_dataframe( - pd.DataFrame( - dict(marginal_cost=marginal_cost, - p_nom=p_nom, - carrier='load shedding', - bus=network.buses.index), - index=index), - "Generator" - ) - return - - -def data_manipulation_sh (network): - from shapely.geometry import Point, LineString, MultiLineString - from geoalchemy2.shape import from_shape, to_shape - - #add connection from Luebeck to Siems - - new_bus = str(int(network.buses.index.max())+1) - new_trafo = str(int(network.transformers.index.max())+1) - new_line = str(int(network.lines.index.max())+1) - network.add("Bus", new_bus,carrier='AC', v_nom=220, x=10.760835, y=53.909745) - network.add("Transformer", new_trafo, bus0="25536", bus1=new_bus, x=1.29960, tap_ratio=1, s_nom=1600) - network.add("Line",new_line, bus0="26387",bus1=new_bus, x=0.0001, s_nom=1600) - network.lines.loc[new_line,'cables']=3.0 - - #bus geom - point_bus1 = Point(10.760835,53.909745) - network.buses.set_value(new_bus, 'geom', from_shape(point_bus1, 4326)) - - #line geom/topo - network.lines.set_value(new_line, 'geom', from_shape(MultiLineString([LineString([to_shape(network.buses.geom['26387']),point_bus1])]),4326)) - network.lines.set_value(new_line, 'topo', from_shape(LineString([to_shape(network.buses.geom['26387']),point_bus1]),4326)) - - #trafo geom/topo - network.transformers.set_value(new_trafo, 'geom', from_shape(MultiLineString([LineString([to_shape(network.buses.geom['25536']),point_bus1])]),4326)) - network.transformers.set_value(new_trafo, 'topo', from_shape(LineString([to_shape(network.buses.geom['25536']),point_bus1]),4326)) - - return - -def results_to_csv(network, path): - """ - """ - if path==False: - return None - - if not os.path.exists(path): - os.makedirs(path, exist_ok=True) - - network.export_to_csv_folder(path) - data = pd.read_csv(os.path.join(path, 'network.csv')) - data['time'] = network.results['Solver'].Time - data.to_csv(os.path.join(path, 'network.csv')) - - if hasattr(network, 'Z'): - file = [i for i in os.listdir(path.strip('0123456789')) if i=='Z.csv'] - if file: - print('Z already calculated') - else: - network.Z.to_csv(path.strip('0123456789')+'/Z.csv', index=False) - - return - -def parallelisation(network, start_h, end_h, group_size, solver_name, extra_functionality=None): - - print("Performing linear OPF, {} snapshot(s) at a time:".format(group_size)) - x = time.time() - for i in range(int((end_h-start_h+1)/group_size)): - network.lopf(network.snapshots[group_size*i:group_size*i+group_size], solver_name=solver_name, extra_functionality=extra_functionality) - - - y = time.time() - z = (y - x) / 60 - return - -def pf_post_lopf(network, scenario): - - network_pf = network - - #For the PF, set the P to the optimised P - network_pf.generators_t.p_set = network_pf.generators_t.p_set.reindex(columns=network_pf.generators.index) - network_pf.generators_t.p_set = network_pf.generators_t.p - - #Calculate q set from p_set with given cosphi - #todo - - #Troubleshooting - #network_pf.generators_t.q_set = network_pf.generators_t.q_set*0 - #network.loads_t.q_set = network.loads_t.q_set*0 - #network.loads_t.p_set['28314'] = network.loads_t.p_set['28314']*0.5 - #network.loads_t.q_set['28314'] = network.loads_t.q_set['28314']*0.5 - #network.transformers.x=network.transformers.x['22596']*0.01 - #contingency_factor=2 - #network.lines.s_nom = contingency_factor*pups.lines.s_nom - #network.transformers.s_nom = network.transformers.s_nom*contingency_factor - - #execute non-linear pf - network_pf.pf(scenario.timeindex, use_seed=True) - - return network_pf - -def calc_line_losses(network): - """ Calculate losses per line with PF result data - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - s0 : series - apparent power of line - i0 : series - current of line - ------- - - """ - #### Line losses - # calculate apparent power S = sqrt(p² + q²) - s0_lines = ((network.lines_t.p0**2 + network.lines_t.q0**2).\ - apply(np.sqrt)) - # calculate current I = S / U - i0_lines = s0_lines / network.lines.v_nom - # calculate losses per line and timestep network.lines_t.line_losses = I² * R - network.lines_t.losses = i0_lines**2 * network.lines.r - # calculate total losses per line - network.lines.losses = np.sum(network.lines_t.losses) - - #### Transformer losses - # calculate apparent power S = sqrt(p² + q²) - s0_trafo = ((network.transformers_t.p0**2 + network.transformers_t.q0**2).\ - apply(np.sqrt)) - # calculate losses per transformer and timestep - # network.transformers_t.losses = s0_trafo / network.transformers.s_nom ## !!! this needs to be finalised - # calculate fix no-load losses per transformer - network.transformers.losses_fix = 0.00275 * network.transformers.s_nom # average value according to http://ibn.ch/HomePageSchule/Schule/GIBZ/19_Transformatoren/19_Transformatoren_Loesung.pdf - # calculate total losses per line - network.transformers.losses = network.transformers.losses_fix # + np.sum(network.transformers_t.losses) - - # calculate total losses (possibly enhance with adding these values to network container) - losses_total = sum(network.lines.losses) + sum(network.transformers.losses) - print("Total lines losses for all snapshots [MW]:",round(losses_total,2)) - losses_costs = losses_total * np.average(network.buses_t.marginal_price) - print("Total costs for these losses [EUR]:",round(losses_costs,2)) - - return - -def loading_minimization(network,snapshots): - - network.model.number1 = Var(network.model.passive_branch_p_index, within = PositiveReals) - network.model.number2 = Var(network.model.passive_branch_p_index, within = PositiveReals) - - def cRule(model, c, l, t): - return (model.number1[c, l, t] - model.number2[c, l, t] == model.passive_branch_p[c, l, t]) - - network.model.cRule=Constraint(network.model.passive_branch_p_index, rule=cRule) - - network.model.objective.expr += 0.00001* sum(network.model.number1[i] + network.model.number2[i] for i in network.model.passive_branch_p_index) - -def kmean_clustering(network): - """ Implement k-mean clustering in existing network - ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - Returns - ------- - - """ - def weighting_for_scenario(x): - b_i = x.index - g = normed(gen.reindex(b_i, fill_value=0)) - l = normed(load.reindex(b_i, fill_value=0)) - - w= g + l - return (w * (100. / w.max())).astype(int) - - def normed(x): - return (x/x.sum()).fillna(0.) - - print('start k-mean clustering') - # prepare k-mean - # k-means clustering (first try) - network.generators.control="PV" - network.buses['v_nom'] = 380. - # problem our lines have no v_nom. this is implicitly defined by the connected buses: - network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) - - # adjust the x of the lines which are not 380. - lines_v_nom_b = network.lines.v_nom != 380 - network.lines.loc[lines_v_nom_b, 'x'] *= (380./network.lines.loc[lines_v_nom_b, 'v_nom'])**2 - network.lines.loc[lines_v_nom_b, 'v_nom'] = 380. - - trafo_index = network.transformers.index - transformer_voltages = pd.concat([network.transformers.bus0.map(network.buses.v_nom), network.transformers.bus1.map(network.buses.v_nom)], axis=1) - - - network.import_components_from_dataframe( - network.transformers.loc[:,['bus0','bus1','x','s_nom']] - .assign(x=network.transformers.x*(380./transformer_voltages.max(axis=1))**2) - .set_index('T' + trafo_index), - 'Line') - network.transformers.drop(trafo_index, inplace=True) - - for attr in network.transformers_t: - network.transformers_t[attr] = network.transformers_t[attr].reindex(columns=[]) - - #ToDo: change conv to types minus wind and solar - conv_types = {'biomass', 'run_of_river', 'gas', 'oil','coal', 'waste','uranium'} - # Attention: network.generators.carrier.unique() - # conv_types only for SH scenario defined! - gen = (network.generators.loc[network.generators.carrier.isin(conv_types) - ].groupby('bus').p_nom.sum().reindex(network.buses.index, - fill_value=0.) + network.storage_units.loc[network.storage_units.carrier.isin(conv_types) - ].groupby('bus').p_nom.sum().reindex(network.buses.index, fill_value=0.)) - - load = network.loads_t.p_set.mean().groupby(network.loads.bus).sum() - - # k-mean clustering - # busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(np.repeat(1, - # len(network.buses)), index=network.buses.index) , n_clusters= 10) - weight = weighting_for_scenario(network.buses).reindex(network.buses.index, fill_value=1) - busmap = busmap_by_kmeans(network, bus_weightings=pd.Series(weight), buses_i=network.buses.index , n_clusters= 10) - - - # ToDo change function in order to use bus_strategies or similar - clustering = get_clustering_from_busmap(network, busmap) - network = clustering.network - #network = cluster_on_extra_high_voltage(network, busmap, with_time=True) - - return network - -def group_parallel_lines(network): - - #ordering of buses: (not sure if still necessary, remaining from SQL code) - old_lines = network.lines - - for line in old_lines.index: - bus0_new = str(old_lines.loc[line,['bus0','bus1']].astype(int).min()) - bus1_new = str(old_lines.loc[line,['bus0','bus1']].astype(int).max()) - old_lines.set_value(line,'bus0',bus0_new) - old_lines.set_value(line,'bus1',bus1_new) - - # saving the old index - for line in old_lines: - old_lines['old_index'] = network.lines.index - - grouped = old_lines.groupby(['bus0','bus1']) - - #calculating electrical properties for parallel lines - grouped_agg = grouped.agg({ 'b': np.sum, - 'b_pu': np.sum, - 'cables': np.sum, - 'capital_cost': np.min, - 'frequency': np.mean, - 'g': np.sum, - 'g_pu': np.sum, - 'geom': lambda x: x[0], - 'length': lambda x: x.min(), - 'num_parallel': np.sum, - 'r': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 'r_pu': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 's_nom': np.sum, - 's_nom_extendable': lambda x: x.min(), - 's_nom_max': np.sum, - 's_nom_min': np.sum, - 's_nom_opt': np.sum, - 'scn_name': lambda x: x.min(), - 'sub_network': lambda x: x.min(), - 'terrain_factor': lambda x: x.min(), - 'topo': lambda x: x[0], - 'type': lambda x: x.min(), - 'v_ang_max': lambda x: x.min(), - 'v_ang_min': lambda x: x.min(), - 'x': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 'x_pu': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 'old_index': np.min}) - - for i in range(0,len(grouped_agg.index)): - grouped_agg.set_value(grouped_agg.index[i],'bus0',grouped_agg.index[i][0]) - grouped_agg.set_value(grouped_agg.index[i],'bus1',grouped_agg.index[i][1]) - - new_lines=grouped_agg.set_index(grouped_agg.old_index) - new_lines=new_lines.drop('old_index',1) - network.lines = new_lines - - return \ No newline at end of file diff --git a/etrago/plots/__init__.py b/etrago/plots/__init__.py deleted file mode 100644 index 6e2d5ecc..00000000 --- a/etrago/plots/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -"""This is the docstring for the example.py module. Modules names should -have short, all-lowercase names. The module name may have underscores if -this improves readability. -Every module should have a docstring at the very top of the file. The -module's docstring may extend over multiple lines. If your docstring does -extend over multiple lines, the closing three quotation marks must be on -a line by itself, preferably preceded by a blank line.""" - -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" - - diff --git a/etrago/extras/__init__.py b/etrago/tools/__init__.py similarity index 100% rename from etrago/extras/__init__.py rename to etrago/tools/__init__.py diff --git a/etrago/tools/config.json b/etrago/tools/config.json new file mode 100644 index 00000000..acbd982e --- /dev/null +++ b/etrago/tools/config.json @@ -0,0 +1,43 @@ +{ + "lopf": + { + "Bus": null, + "Generator": + { + "GeneratorPqSet": ["p_set", "p_max_pu"] + }, + "Line": null, + "Transformer": null, + "Load": + { + "LoadPqSet": ["p_set", "q_set"] + }, + "Storage": + { + "StoragePqSet": ["p_set"] + } + }, + "pf": + { + "Bus": + { + "BusVMagSet":["v_mag_pu_set"] + }, + "Generator": + { + "GeneratorPqSet": ["p_set", "q_set"] + }, + "Line": null, + "Transformer": null, + "Load": + { + "LoadPqSet": ["p_set", "q_set"] + }, + "Storage": + { + "StoragePqSet": ["p_set", "q_set"] + } + } +} + + diff --git a/etrago/tools/io.py b/etrago/tools/io.py new file mode 100644 index 00000000..198e00a2 --- /dev/null +++ b/etrago/tools/io.py @@ -0,0 +1,972 @@ +""" io.py + +Input/output operations between powerflow schema in the oedb and PyPSA. +Additionally oedb wrapper classes to instantiate PyPSA network objects. + + +Attributes +---------- + +packagename: str + Package containing orm class definitions +temp_ormclass: str + Orm class name of table with temporal resolution +carr_ormclass: str + Orm class name of table with carrier id to carrier name datasets + +""" + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "ulfmueller, mariusves" + +import pypsa +from importlib import import_module +import pandas as pd +from sqlalchemy.orm.exc import NoResultFound +from sqlalchemy import and_, func +from collections import OrderedDict +import re +import json +import os + + +packagename = 'egoio.db_tables' +temp_ormclass = 'TempResolution' +carr_ormclass = 'Source' + +def loadcfg(path=''): + if path == '': + dirname = os.path.dirname(__file__) + path = os.path.join(dirname, 'config.json') + return json.load(open(path), object_pairs_hook=OrderedDict) + + +class ScenarioBase(): + """ Base class to hide package/db handling + """ + + def __init__(self, session, method, version=None, *args, **kwargs): + + global temp_ormclass + global carr_ormclass + + schema = 'model_draft' if version is None else 'grid' + + cfgpath = kwargs.get('cfgpath', '') + self.config = loadcfg(cfgpath)[method] + + self.session = session + self.version = version + self._prefix = kwargs.get('prefix', 'EgoGridPfHv') + self._pkg = import_module(packagename + '.' + schema) + self._mapped = {} + + # map static and timevarying classes + for k, v in self.config.items(): + self.map_ormclass(k) + if isinstance(v, dict): + for kk in v.keys(): + self.map_ormclass(kk) + + # map temporal resolution table + self.map_ormclass(temp_ormclass) + + # map carrier id to carrier table + self.map_ormclass(carr_ormclass) + + def map_ormclass(self, name): + + global packagename + + try: + self._mapped[name] = getattr(self._pkg, self._prefix + name) + + except AttributeError: + print('Warning: Relation %s does not exist.' % name) + + +class NetworkScenario(ScenarioBase): + """ + """ + + def __init__(self, session, *args, **kwargs): + super().__init__(session, *args, **kwargs) + + self.scn_name = kwargs.get('scn_name', 'Status Quo') + self.method = kwargs.get('method', 'lopf') + self.start_snapshot = kwargs.get('start_snapshot', 1) + self.end_snapshot = kwargs.get('end_snapshot', 20) + self.temp_id = kwargs.get('temp_id', 1) + self.network = None + + self.configure_timeindex() + + def __repr__(self): + r = ('NetworkScenario: %s' % self.scn_name) + + if not self.network: + r += "\nTo create a PyPSA network call .build_network()." + + return r + + def configure_timeindex(self): + """ + """ + + try: + + ormclass = self._mapped['TempResolution'] + tr = self.session.query(ormclass).filter( + ormclass.temp_id == self.temp_id).one() + + except (KeyError, NoResultFound): + print('temp_id %s does not exist.' % self.temp_id) + + timeindex = pd.DatetimeIndex(start=tr.start_time, + periods=tr.timesteps, + freq=tr.resolution) + + self.timeindex = timeindex[self.start_snapshot - 1: self.end_snapshot] + + def id_to_source(self): + + ormclass = self._mapped['Source'] + query = self.session.query(ormclass) + + # TODO column naming in database + return {k.source_id: k.name for k in query.all()} + + def by_scenario(self, name): + """ + """ + + ormclass = self._mapped[name] + query = self.session.query(ormclass).filter( + ormclass.scn_name == self.scn_name) + + if self.version: + query = query.filter(ormclass.version == self.version) + + # TODO: Better handled in db + if name == 'Transformer': + name = 'Trafo' + + df = pd.read_sql(query.statement, + self.session.bind, + index_col=name.lower() + '_id') + + if 'source' in df: + df.source = df.source.map(self.id_to_source()) + + return df + + def series_by_scenario(self, name, column): + """ + """ + + ormclass = self._mapped[name] + + # TODO: pls make more robust + id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id' + id_column = id_column.lower() + + query = self.session.query( + getattr(ormclass, id_column), + getattr(ormclass, column)[self.start_snapshot: self.end_snapshot]. + label(column)).filter(and_( + ormclass.scn_name == self.scn_name, + ormclass.temp_id == self.temp_id)) + + if self.version: + query = query.filter(ormclass.version == self.version) + + df = pd.io.sql.read_sql(query.statement, + self.session.bind, + columns=[column], + index_col=id_column) + + df.index = df.index.astype(str) + + # change of format to fit pypsa + df = df[column].apply(pd.Series).transpose() + + try: + assert not df.empty + df.index = self.timeindex + except AssertionError: + print("No data for %s in column %s." % (name, column)) + + return df + + def build_network(self, *args, **kwargs): + """ + """ + # TODO: build_network takes care of divergences in database design and + # future PyPSA changes from PyPSA's v0.6 on. This concept should be + # replaced, when the oedb has a revision system in place, because + # sometime this will break!!! + + network = pypsa.Network() + network.set_snapshots(self.timeindex) + + timevarying_override = False + + if pypsa.__version__ == '0.8.0': + + old_to_new_name = {'Generator': + {'p_min_pu_fixed': 'p_min_pu', + 'p_max_pu_fixed': 'p_max_pu', + 'source': 'carrier', + 'dispatch': 'former_dispatch'}, + 'Bus': + {'current_type': 'carrier'}, + 'Transformer': + {'trafo_id': 'transformer_id'}, + 'Storage': + {'p_min_pu_fixed': 'p_min_pu', + 'p_max_pu_fixed': 'p_max_pu', + 'soc_cyclic': 'cyclic_state_of_charge', + 'soc_initial': 'state_of_charge_initial'}} + + timevarying_override = True + + else: + + old_to_new_name = {'Storage': + {'soc_cyclic': 'cyclic_state_of_charge', + 'soc_initial': 'state_of_charge_initial'}} + + for comp, comp_t_dict in self.config.items(): + + # TODO: This is confusing, should be fixed in db + pypsa_comp_name = 'StorageUnit' if comp == 'Storage' else comp + + df = self.by_scenario(comp) + + if comp in old_to_new_name: + + tmp = old_to_new_name[comp] + df.rename(columns=tmp, inplace=True) + + network.import_components_from_dataframe(df, pypsa_comp_name) + + if comp_t_dict: + + for comp_t, columns in comp_t_dict.items(): + + for col in columns: + + df_series = self.series_by_scenario(comp_t, col) + + # TODO: VMagPuSet? + if timevarying_override and comp == 'Generator': + idx = df[df.former_dispatch == 'flexible'].index + idx = [i for i in idx if i in df_series.columns] + df_series.drop(idx, axis=1, inplace=True) + + try: + + pypsa.io.import_series_from_dataframe( + network, + df_series, + pypsa_comp_name, + col) + + except (ValueError, AttributeError): + print("Series %s of component %s could not be " + "imported" % (col, pypsa_comp_name)) + + self.network = network + + return network + +def clear_results_db(session): + from egoio.db_tables.model_draft import EgoGridPfHvResultBus as BusResult,\ + EgoGridPfHvResultBusT as BusTResult,\ + EgoGridPfHvResultStorage as StorageResult,\ + EgoGridPfHvResultStorageT as StorageTResult,\ + EgoGridPfHvResultGenerator as GeneratorResult,\ + EgoGridPfHvResultGeneratorT as GeneratorTResult,\ + EgoGridPfHvResultLine as LineResult,\ + EgoGridPfHvResultLineT as LineTResult,\ + EgoGridPfHvResultLoad as LoadResult,\ + EgoGridPfHvResultLoadT as LoadTResult,\ + EgoGridPfHvResultTransformer as TransformerResult,\ + EgoGridPfHvResultTransformerT as TransformerTResult,\ + EgoGridPfHvResultMeta as ResultMeta + session.query(BusResult).delete() + session.query(BusTResult).delete() + session.query(StorageResult).delete() + session.query(StorageTResult).delete() + session.query(GeneratorResult).delete() + session.query(GeneratorTResult).delete() + session.query(LoadResult).delete() + session.query(LoadTResult).delete() + session.query(LineResult).delete() + session.query(LineTResult).delete() + session.query(TransformerResult).delete() + session.query(TransformerTResult).delete() + session.query(ResultMeta).delete() + session.commit() + + +def results_to_oedb(session, network, grid, args): + """Return results obtained from PyPSA to oedb""" + # moved this here to prevent error when not using the mv-schema + import datetime + if grid.lower() == 'mv': + print('MV currently not implemented') + elif grid.lower() == 'hv': + from egoio.db_tables.model_draft import EgoGridPfHvResultBus as BusResult,\ + EgoGridPfHvResultBusT as BusTResult,\ + EgoGridPfHvResultStorage as StorageResult,\ + EgoGridPfHvResultStorageT as StorageTResult,\ + EgoGridPfHvResultGenerator as GeneratorResult,\ + EgoGridPfHvResultGeneratorT as GeneratorTResult,\ + EgoGridPfHvResultLine as LineResult,\ + EgoGridPfHvResultLineT as LineTResult,\ + EgoGridPfHvResultLoad as LoadResult,\ + EgoGridPfHvResultLoadT as LoadTResult,\ + EgoGridPfHvResultTransformer as TransformerResult,\ + EgoGridPfHvResultTransformerT as TransformerTResult,\ + EgoGridPfHvResultMeta as ResultMeta + else: + print('Please enter mv or hv!') + + # get last result id and get new one + last_res_id = session.query(func.max(ResultMeta.result_id)).scalar() + if last_res_id == None: + new_res_id = 1 + else: + new_res_id = last_res_id + 1 + + # result meta data + res_meta = ResultMeta() + meta_misc = [] + for arg, value in args.items(): + if arg not in dir(res_meta) and arg not in ['db','lpfile','results','export']: + meta_misc.append([arg,str(value)]) + + res_meta.result_id=new_res_id + res_meta.scn_name=args['scn_name'] + res_meta.calc_date= datetime.datetime.now() + res_meta.method=args['method'] + res_meta.gridversion = args['gridversion'] + res_meta.start_snapshot = args['start_snapshot'] + res_meta.end_snapshot = args['end_snapshot'] + res_meta.snapshots = network.snapshots.tolist() + res_meta.solver = args['solver'] + res_meta.branch_capacity_factor = args['branch_capacity_factor'] + res_meta.pf_post_lopf = args['pf_post_lopf'] + res_meta.network_clustering = args['network_clustering'] + res_meta.storage_extendable = args['storage_extendable'] + res_meta.load_shedding = args['load_shedding'] + res_meta.generator_noise = args['generator_noise'] + res_meta.minimize_loading=args['minimize_loading'] + res_meta.k_mean_clustering=args['k_mean_clustering'] + res_meta.parallelisation=args['parallelisation'] + res_meta.line_grouping=args['line_grouping'] + res_meta.misc=meta_misc + res_meta.comments=args['comments'] + + session.add(res_meta) + session.commit() + + # new result bus + + for col in network.buses_t.v_mag_pu: + res_bus = BusResult() + + res_bus.result_id = new_res_id + res_bus.bus_id = col + try: + res_bus.x = network.buses.x[col] + except: + res_bus.x = None + try: + res_bus.y = network.buses.y[col] + except: + res_bus.y = None + try: + res_bus.v_nom = network.buses.v_nom[col] + except: + res_bus.v_nom = None + try: + res_bus.current_type=network.buses.carrier[col] + except: + res_bus.current_type=None + try: + res_bus.v_mag_pu_min = network.buses.v_mag_pu_min[col] + except: + res_bus.v_mag_pu_min = None + try: + res_bus.v_mag_pu_max = network.buses.v_mag_pu_max[col] + except: + res_bus.v_mag_pu_max = None + try: + res_bus.geom = network.buses.geom[col] + except: + res_bus.geom = None + session.add(res_bus) + session.commit() + +# not working yet since ego.io classes are not yet iterable +# for col in network.buses_t.v_mag_pu: +# res_bus = BusResult() +# res_bus.result_id = new_res_id +# res_bus.bus_id = col +# for var in dir(res_bus): +# if not var.startswith('_') and var not in ('result_id','bus_id'): +# try: +# res_bus.var = 3 #network.buses.var[col] +# except: +# raise ValueError('WRONG') +# session.add(res_bus) +# session.commit() + + + # new result bus_t + for col in network.buses_t.v_mag_pu: + res_bus_t = BusTResult() + + res_bus_t.result_id = new_res_id + res_bus_t.bus_id = col + try: + res_bus_t.p = network.buses_t.p[col].tolist() + except: + res_bus_t.p = None + try: + res_bus_t.q = network.buses_t.q[col].tolist() + except: + res_bus_t.q = None + try: + res_bus_t.v_mag_pu = network.buses_t.v_mag_pu[col].tolist() + except: + res_bus_t.v_mag_pu = None + try: + res_bus_t.v_ang = network.buses_t.v_ang[col].tolist() + except: + res_bus_t.v_ang = None + try: + res_bus_t.marginal_price = network.buses_t.marginal_price[col].tolist() + except: + res_bus_t.marginal_price = None + + session.add(res_bus_t) + session.commit() + + + # generator results + for col in network.generators_t.p: + res_gen = GeneratorResult() + res_gen.result_id = new_res_id + res_gen.generator_id = col + res_gen.bus = int(network.generators.bus[col]) + try: + res_gen.dispatch = network.generators.former_dispatch[col] + except: + res_gen.dispatch = None + try: + res_gen.control = network.generators.control[col] + except: + res_gen.control = None + try: + res_gen.p_nom = network.generators.p_nom[col] + except: + res_gen.p_nom = None + try: + res_gen.p_nom_extendable = bool(network.generators.p_nom_extendable[col]) + except: + res_gen.p_nom_extendable = None + try: + res_gen.p_nom_min = network.generators.p_nom_min[col] + except: + res_gen.p_nom_min = None + try: + res_gen.p_nom_max = network.generators.p_nom_max[col] + except: + res_gen.p_nom_max = None + try: + res_gen.p_min_pu_fixed = network.generators.p_min_pu[col] + except: + res_gen.p_min_pu_fixed = None + try: + res_gen.p_max_pu_fixed = network.generators.p_max_pu[col] + except: + res_gen.p_max_pu_fixed = None + try: + res_gen.sign = network.generators.sign[col] + except: + res_gen.sign = None +# try: +# res_gen.source = network.generators.carrier[col] +# except: +# res_gen.source = None + try: + res_gen.marginal_cost = network.generators.marginal_cost[col] + except: + res_gen.marginal_cost = None + try: + res_gen.capital_cost = network.generators.capital_cost[col] + except: + res_gen.capital_cost = None + try: + res_gen.efficiency = network.generators.efficiency[col] + except: + res_gen.efficiency = None + try: + res_gen.p_nom_opt = network.generators.p_nom_opt[col] + except: + res_gen.p_nom_opt = None + session.add(res_gen) + session.commit() + + # generator_t results + for col in network.generators_t.p: + res_gen_t = GeneratorTResult() + res_gen_t.result_id = new_res_id + res_gen_t.generator_id = col + try: + res_gen_t.p_set = network.generators_t.p_set[col].tolist() + except: + res_gen_t.p_set = None + try: + res_gen_t.q_set = network.generators_t.q_set[col].tolist() + except: + res_gen_t.q_set = None + try: + res_gen_t.p_min_pu = network.generators_t.p_min_pu[col].tolist() + except: + res_gen_t.p_min_pu = None + try: + res_gen_t.p_max_pu = network.generators_t.p_max_pu[col].tolist() + except: + res_gen_t.p_max_pu = None + try: + res_gen_t.p = network.generators_t.p[col].tolist() + except: + res_gen_t.p = None + try: + res_gen_t.q = network.generators_t.q[col].tolist() + except: + res_gen_t.q = None + try: + res_gen_t.status = network.generators_t.status[col].tolist() + except: + res_gen_t.status = None + session.add(res_gen_t) + session.commit() + + + # line results + for col in network.lines_t.p0: + res_line = LineResult() + res_line.result_id=new_res_id, + res_line.line_id=col + res_line.bus0=int(network.lines.bus0[col]) + res_line.bus1=int(network.lines.bus1[col]) + try: + res_line.x = network.lines.x[col] + except: + res_line.x = None + try: + res_line.r = network.lines.r[col] + except: + res_line.r = None + try: + res_line.g = network.lines.g[col] + except: + res_line.g = None + try: + res_line.b = network.lines.b[col] + except: + res_line.b = None + try: + res_line.s_nom = network.lines.s_nom[col] + except: + res_line.s_nom = None + try: + res_line.s_nom_extendable = bool(network.lines.s_nom_extendable[col]) + except: + res_line.s_nom_extendable = None + try: + res_line.s_nom_min = network.lines.s_nom_min[col] + except: + res_line.s_nom_min = None + try: + res_line.s_nom_max = network.lines.s_nom_max[col] + except: + res_line.s_nom_max = None + try: + res_line.capital_cost = network.lines.capital_cost[col] + except: + res_line.capital_cost = None + try: + res_line.length = network.lines.length[col] + except: + res_line.length = None + try: + res_line.cables = int(network.lines.cables[col]) + except: + res_line.cables = None + try: + res_line.frequency = network.lines.frequency[col] + except: + res_line.frequency = None + try: + res_line.terrain_factor = network.lines.terrain_factor[col] + except: + res_line.terrain_factor = None + try: + res_line.x_pu = network.lines.x_pu[col] + except: + res_line.x_pu = None + try: + res_line.r_pu = network.lines.r_pu[col] + except: + res_line.r_pu = None + try: + res_line.g_pu = network.lines.g_pu[col] + except: + res_line.g_pu = None + try: + res_line.b_pu = network.lines.b_pu[col] + except: + res_line.b_pu = None + try: + res_line.s_nom_opt = network.lines.s_nom_opt[col] + except: + res_line.s_nom_opt = None + try: + res_line.geom = network.lines.geom[col] + except: + res_line.geom = None + try: + res_line.topo = network.lines.topo[col] + except: + res_line.topo = None + session.add(res_line) + session.commit() + + + # line_t results + for col in network.lines_t.p0: + res_line_t = LineTResult() + res_line_t.result_id=new_res_id, + res_line_t.line_id=col + try: + res_line_t.p0 = network.lines_t.p0[col].tolist() + except: + res_line_t.p0 = None + try: + res_line_t.q0 = network.lines_t.q0[col].tolist() + except: + res_line_t.q0 = None + try: + res_line_t.p1 = network.lines_t.p1[col].tolist() + except: + res_line_t.p1 = None + try: + res_line_t.q1 = network.lines_t.q1[col].tolist() + except: + res_line_t.q1 = None + session.add(res_line_t) + session.commit() + + + # load results + for col in network.loads_t.p: + res_load = LoadResult() + res_load.result_id=new_res_id, + res_load.load_id=col + res_load.bus = int(network.loads.bus[col]) + try: + res_load.sign = network.loads.sign[col] + except: + res_load.sign = None + try: + res_load.e_annual = network.loads.e_annual[col] + except: + res_load.e_annual = None + session.add(res_load) + session.commit() + + # load_t results + for col in network.loads_t.p: + res_load_t = LoadTResult() + res_load_t.result_id=new_res_id, + res_load_t.load_id=col + try: + res_load_t.p_set = network.loads_t.p_set[col].tolist() + except: + res_load_t.p_set = None + try: + res_load_t.q_set = network.loads_t.q_set[col].tolist() + except: + res_load_t.q_set = None + try: + res_load_t.p = network.loads_t.p[col].tolist() + except: + res_load_t.p = None + try: + res_load_t.q = network.loads_t.q[col].tolist() + except: + res_load_t.q = None + session.add(res_load_t) + session.commit() + + + # insert results of transformers + + for col in network.transformers_t.p0: + res_transformer = TransformerResult() + res_transformer.result_id=new_res_id + res_transformer.trafo_id=col + res_transformer.bus0=int(network.transformers.bus0[col]) + res_transformer.bus1=int(network.transformers.bus1[col]) + try: + res_transformer.x = network.transformers.x[col] + except: + res_transformer.x = None + try: + res_transformer.r = network.transformers.r[col] + except: + res_transformer.r = None + try: + res_transformer.g = network.transformers.g[col] + except: + res_transformer.g = None + try: + res_transformer.b = network.transformers.b[col] + except: + res_transformer.b = None + try: + res_transformer.s_nom = network.transformers.s_nom[col] + except: + res_transformer.s_nom = None + try: + res_transformer.s_nom_extendable = bool(network.transformers.s_nom_extendable[col]) + except: + res_transformer.s_nom_extendable = None + try: + res_transformer.s_nom_min = network.transformers.s_nom_min[col] + except: + res_transformer.s_nom_min = None + try: + res_transformer.s_nom_max = network.transformers.s_nom_max[col] + except: + res_transformer.s_nom_max = None + try: + res_transformer.tap_ratio = network.transformers.tap_ratio[col] + except: + res_transformer.tap_ratio = None + try: + res_transformer.phase_shift = network.transformers.phase_shift[col] + except: + res_transformer.phase_shift = None + try: + res_transformer.capital_cost = network.transformers.capital_cost[col] + except: + res_transformer.capital_cost = None + try: + res_transformer.x_pu = network.transformers.x_pu[col] + except: + res_transformer.x_pu = None + try: + res_transformer.r_pu = network.transformers.r_pu[col] + except: + res_transformer.r_pu = None + try: + res_transformer.g_pu = network.transformers.g_pu[col] + except: + res_transformer.g_pu = None + try: + res_transformer.b_pu = network.transformers.b_pu[col] + except: + res_transformer.b_pu = None + try: + res_transformer.s_nom_opt = network.transformers.s_nom_opt[col] + except: + res_transformer.s_nom_opt = None + try: + res_transformer.geom = network.transformers.geom[col] + except: + res_transformer.geom = None + try: + res_transformer.topo = network.transformers.topo[col] + except: + res_transformer.topo = None + session.add(res_transformer) + session.commit() + + # insert results of transformers_t + for col in network.transformers_t.p0: + res_transformer_t = TransformerTResult() + res_transformer_t.result_id=new_res_id + res_transformer_t.trafo_id=col + try: + res_transformer_t.p0 = network.transformers_t.p0[col].tolist() + except: + res_transformer_t.p0 = None + try: + res_transformer_t.q0 = network.transformers_t.q0[col].tolist() + except: + res_transformer_t.q0 = None + try: + res_transformer_t.p1 = network.transformers_t.p1[col].tolist() + except: + res_transformer_t.p1 = None + try: + res_transformer_t.q1 = network.transformers_t.q1[col].tolist() + except: + res_transformer_t.q1 = None + session.add(res_transformer_t) + session.commit() + + + + # storage_units results + + for col in network.storage_units_t.p: + res_sto = StorageResult() + res_sto.result_id=new_res_id, + res_sto.storage_id=col, + res_sto.bus=int(network.storage_units.bus[col]) + try: + res_sto.dispatch = network.storage_units.dispatch[col] + except: + res_sto.dispatch = None + try: + res_sto.control = network.storage_units.control[col] + except: + res_sto.control = None + try: + res_sto.p_nom = network.storage_units.p_nom[col] + except: + res_sto.p_nom = None + try: + res_sto.p_nom_extendable = bool(network.storage_units.p_nom_extendable[col]) + except: + res_sto.p_nom_extendable = None + try: + res_sto.p_nom_min = network.storage_units.p_nom_min[col] + except: + res_sto.p_nom_min = None + try: + res_sto.p_nom_max = network.storage_units.p_nom_max[col] + except: + res_sto.p_nom_max = None + try: + res_sto.p_min_pu_fixed = network.storage_units.p_min_pu[col] + except: + res_sto.p_min_pu_fixed = None + try: + res_sto.p_max_pu_fixed = network.storage_units.p_max_pu[col] + except: + res_sto.p_max_pu_fixed = None + try: + res_sto.sign = network.storage_units.sign[col] + except: + res_sto.sign = None +# try: +# res_sto.source = network.storage_units.carrier[col] +# except: +# res_sto.source = None + try: + res_sto.marginal_cost = network.storage_units.marginal_cost[col] + except: + res_sto.marginal_cost = None + try: + res_sto.capital_cost = network.storage_units.capital_cost[col] + except: + res_sto.capital_cost = None + try: + res_sto.efficiency = network.storage_units.efficiency[col] + except: + res_sto.efficiency = None + try: + res_sto.soc_initial = network.storage_units.state_of_charge_initial[col] + except: + res_sto.soc_initial = None + try: + res_sto.soc_cyclic = bool(network.storage_units.cyclic_state_of_charge[col]) + except: + res_sto.soc_cyclic = None + try: + res_sto.max_hours = network.storage_units.max_hours[col] + except: + res_sto.max_hours = None + try: + res_sto.efficiency_store = network.storage_units.efficiency_store[col] + except: + res_sto.efficiency_store = None + try: + res_sto.efficiency_dispatch = network.storage_units.efficiency_dispatch[col] + except: + res_sto.efficiency_dispatch = None + try: + res_sto.standing_loss = network.storage_units.standing_loss[col] + except: + res_sto.standing_loss = None + try: + res_sto.p_nom_opt = network.storage_units.p_nom_opt[col] + except: + res_sto.p_nom_opt = None + session.add(res_sto) + session.commit() + + # storage_units_t results + for col in network.storage_units_t.p: + res_sto_t = StorageTResult() + res_sto_t.result_id=new_res_id, + res_sto_t.storage_id=col, + try: + res_sto_t.p_set = network.storage_units_t.p_set[col].tolist() + except: + res_sto_t.p_set = None + try: + res_sto_t.q_set = network.storage_units_t.q_set[col].tolist() + except: + res_sto_t.q_set = None + try: + res_sto_t.p_min_pu = network.storage_units_t.p_min_pu[col].tolist() + except: + res_sto_t.p_min_pu = None + try: + res_sto_t.p_max_pu = network.storage_units_t.p_max_pu[col].tolist() + except: + res_sto_t.p_max_pu = None + try: + res_sto_t.soc_set = network.storage_units_t.state_of_charge_set[col].tolist() + except: + res_sto_t.soc_set = None + try: + res_sto_t.inflow = network.storage_units_t.inflow[col].tolist() + except: + res_sto_t.inflow = None + try: + res_sto_t.p = network.storage_units_t.p[col].tolist() + except: + res_sto_t.p = None + try: + res_sto_t.q = network.storage_units_t.q[col].tolist() + except: + res_sto_t.q = None + try: + res_sto_t.state_of_charge = network.storage_units_t.state_of_charge[col].tolist() + except: + res_sto_t.state_of_charge = None + try: + res_sto_t.spill = network.storage_units_t.spill[col].tolist() + except: + res_sto_t.spill = None + session.add(res_sto_t) + session.commit() + + + +if __name__ == '__main__': + if pypsa.__version__ not in ['0.6.2', '0.8.0']: + print('Pypsa version %s not supported.' % pypsa.__version__) + pass diff --git a/etrago/tools/plot.py b/etrago/tools/plot.py new file mode 100644 index 00000000..7bb47669 --- /dev/null +++ b/etrago/tools/plot.py @@ -0,0 +1,689 @@ +""" +Plot.py defines functions necessary to plot results of eTraGo. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation; either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +""" + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "ulfmueller, MarlonSchlemminger, mariusves, lukasol" + +from math import sqrt +from geoalchemy2.shape import to_shape +from matplotlib import pyplot as plt +import pandas as pd +import numpy as np +import time +import matplotlib + + +def add_coordinates(network): + """ + Add coordinates to nodes based on provided geom + + Parameters + ---------- + network : PyPSA network container + + Returns + ------- + Altered PyPSA network container ready for plotting + """ + for idx, row in network.buses.iterrows(): + wkt_geom = to_shape(row['geom']) + network.buses.loc[idx, 'x'] = wkt_geom.x + network.buses.loc[idx, 'y'] = wkt_geom.y + + return network + +def plot_line_loading(network, timestep=0, filename=None, boundaries=[], + arrows=False): + """ + Plot line loading as color on lines + + Displays line loading relative to nominal capacity + Parameters + ---------- + network : PyPSA network container + Holds topology of grid including results from powerflow analysis + filename : str + Specify filename + If not given, figure will be show directly + """ + # TODO: replace p0 by max(p0,p1) and analogously for q0 + # TODO: implement for all given snapshots + + # calculate relative line loading as S/S_nom + # with S = sqrt(P^2 + Q^2) + x = time.time() + cmap = plt.cm.jet + if network.lines_t.q0.empty: + loading_c = (network.lines_t.p0.loc[network.snapshots[timestep]]/ \ + (network.lines.s_nom)) * 100 + loading = abs(loading_c) + else: + loading = ((network.lines_t.p0.loc[network.snapshots[timestep]] ** 2 + + network.lines_t.q0.loc[network.snapshots[timestep]] ** 2).\ + apply(sqrt) / (network.lines.s_nom)) * 100 + + # do the plotting + ll = network.plot(line_colors=abs(loading), line_cmap=cmap, + title="Line loading", line_widths=0.55) + + # add colorbar, note mappable sliced from ll by [1] + + if not boundaries: + cb = plt.colorbar(ll[1]) + elif boundaries: + v = np.linspace(boundaries[0], boundaries[1], 101) + cb = plt.colorbar(ll[1], boundaries=v, + ticks=v[0:101:10]) + cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) + + cb.set_label('Line loading in %') + +#============================================================================== +# x, y, u, v = np.zeros((4, 10)) +# path = ll[1].get_segments() +# for i in range(0, len(x)): +# x[i] = path[i][0][0] +# y[i] = path[i][0][1] +# u[i] = path[i][1][0] - path[i][0][0] +# v[i] = path[i][1][1] - path[i][0][1] +# plt.quiver(x, y, u, v, scale=1, units="xy") +# plt.axis('equal') +# plt.grid() +#============================================================================== + + if arrows: + ax = plt.axes() + path = ll[1].get_segments() + x_coords_lines = np.zeros([len(path)]) + cmap = cmap + colors = cmap(ll[1].get_array()/100) + for i in range(0, len(path)): + x_coords_lines[i] = network.buses.loc[str(network.lines.iloc[i, 2]),'x'] + color = colors[i] + if (x_coords_lines[i] == path[i][0][0] and loading[i] >= 0)\ + or (x_coords_lines[i] != path[i][0][0] and loading[i] < 0): + arrowprops = dict(arrowstyle="<-", color=color) + else: + arrowprops = dict(arrowstyle="->", color=color) + ax.annotate("", + xy=abs((path[i][0] - path[i][1]) * 0.51 - path[i][0]), + xytext=abs((path[i][0] - path[i][1]) * 0.49 - path[i][0]), + arrowprops=arrowprops, + size=10 + ) + +#============================================================================== +# ax = plt.axes() +# for i in range(0, 10): +# ax.arrow(x = ll[1].get_segments()[i][0][0], +# y = ll[1].get_segments()[i][0][1], +# dx = ll[1].get_segments()[i][1][0] - ll[1].get_segments()[i][0][0], +# dy = ll[1].get_segments()[i][1][1] - ll[1].get_segments()[i][0][1] +# ) +#============================================================================== + + if filename is None: + plt.show() + else: + plt.savefig(filename) + plt.close() + + y = time.time() + z = (y-x)/60 + print(z) + + +def plot_line_loading_diff(networkA, networkB, timestep=0): + """ + Plot difference in line loading between two networks + (with and without switches) as color on lines + + Positive values mean that line loading with switches is bigger than without + Plot switches as small dots + Parameters + ---------- + networkA : PyPSA network container + Holds topology of grid with switches + including results from powerflow analysis + networkB : PyPSA network container + Holds topology of grid without switches + including results from powerflow analysis + filename : str + Specify filename + If not given, figure will be show directly + timestep : int + timestep to show, default is 0 + """ + + # new colormap to make sure 0% difference has the same color in every plot + def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'): + ''' + Function to offset the "center" of a colormap. Useful for + data with a negative min and positive max and you want the + middle of the colormap's dynamic range to be at zero + + Input + ----- + cmap : The matplotlib colormap to be altered + start : Offset from lowest point in the colormap's range. + Defaults to 0.0 (no lower ofset). Should be between + 0.0 and `midpoint`. + midpoint : The new center of the colormap. Defaults to + 0.5 (no shift). Should be between 0.0 and 1.0. In + general, this should be 1 - vmax/(vmax + abs(vmin)) + For example if your data range from -15.0 to +5.0 and + you want the center of the colormap at 0.0, `midpoint` + should be set to 1 - 5/(5 + 15)) or 0.75 + stop : Offset from highets point in the colormap's range. + Defaults to 1.0 (no upper ofset). Should be between + `midpoint` and 1.0. + ''' + cdict = { + 'red': [], + 'green': [], + 'blue': [], + 'alpha': [] + } + + # regular index to compute the colors + reg_index = np.linspace(start, stop, 257) + + # shifted index to match the data + shift_index = np.hstack([ + np.linspace(0.0, midpoint, 128, endpoint=False), + np.linspace(midpoint, 1.0, 129, endpoint=True) + ]) + + for ri, si in zip(reg_index, shift_index): + r, g, b, a = cmap(ri) + + cdict['red'].append((si, r, r)) + cdict['green'].append((si, g, g)) + cdict['blue'].append((si, b, b)) + cdict['alpha'].append((si, a, a)) + + newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict) + plt.register_cmap(cmap=newcmap) + + return newcmap + + # calculate difference in loading between both networks + loading_switches = abs(networkA.lines_t.p0.loc[networkA.snapshots[timestep]].to_frame()) + loading_switches.columns = ['switch'] + loading_noswitches = abs(networkB.lines_t.p0.loc[networkB.snapshots[timestep]].to_frame()) + loading_noswitches.columns = ['noswitch'] + diff_network = loading_switches.join(loading_noswitches) + diff_network['noswitch'] = diff_network['noswitch'].fillna(diff_network['switch']) + diff_network[networkA.snapshots[timestep]] = diff_network['switch']-diff_network['noswitch'] + + # get switches + new_buses = pd.Series(index=networkA.buses.index.values) + new_buses.loc[set(networkA.buses.index.values)-set(networkB.buses.index.values)] = 0.1 + new_buses = new_buses.fillna(0) + + # plot network with difference in loading and shifted colormap + loading = (diff_network.loc[:, networkA.snapshots[timestep]]/ \ + (networkA.lines.s_nom)) * 100 + midpoint = 1 - max(loading)/(max(loading) + abs(min(loading))) + shifted_cmap = shiftedColorMap(plt.cm.jet, midpoint=midpoint, name='shifted') + ll = networkA.plot(line_colors=loading, line_cmap=shifted_cmap, + title="Line loading", bus_sizes=new_buses, + bus_colors='blue', line_widths=0.55) + + cb = plt.colorbar(ll[1]) + cb.set_label('Difference in line loading in % of s_nom') + + +def plot_residual_load(network): + """ Plots residual load summed of all exisiting buses. + + Parameters + ---------- + network : PyPSA network containter + """ + + renewables = network.generators[ + network.generators.dispatch == 'variable'] + renewables_t = network.generators.p_nom[renewables.index] * \ + network.generators_t.p_max_pu[renewables.index] + load = network.loads_t.p_set.sum(axis=1) + all_renew = renewables_t.sum(axis=1) + residual_load = load - all_renew + residual_load.plot(drawstyle='steps', lw=2, color='red', legend='residual load') + # sorted curve + sorted_residual_load = residual_load.sort_values( + ascending=False).reset_index() + sorted_residual_load.plot(drawstyle='steps', lw=1.4, color='red') + + +def plot_stacked_gen(network, bus=None, resolution='GW', filename=None): + """ + Plot stacked sum of generation grouped by carrier type + + + Parameters + ---------- + network : PyPSA network container + bus: string + Plot all generators at one specific bus. If none, + sum is calulated for all buses + resolution: string + Unit for y-axis. Can be either GW/MW/KW + + Returns + ------- + Plot + """ + if resolution == 'GW': + reso_int = 1e3 + elif resolution == 'MW': + reso_int = 1 + elif resolution == 'KW': + reso_int = 0.001 + + # sum for all buses + if bus==None: + p_by_carrier = pd.concat([network.generators_t.p + [network.generators[network.generators.control!='Slack'].index], + network.generators_t.p[network.generators[network. + generators.control=='Slack'].index].iloc[:,0]. + apply(lambda x: x if x > 0 else 0)], axis=1).\ + groupby(network.generators.carrier, axis=1).sum() + load = network.loads_t.p.sum(axis=1) + if hasattr(network, 'foreign_trade'): + trade_sum = network.foreign_trade.sum(axis=1) + p_by_carrier['imports'] = trade_sum[trade_sum > 0] + p_by_carrier['imports'] = p_by_carrier['imports'].fillna(0) + # sum for a single bus + elif bus is not None: + filtered_gens = network.generators[network.generators['bus'] == bus] + p_by_carrier = network.generators_t.p.\ + groupby(filtered_gens.carrier, axis=1).sum() + filtered_load = network.loads[network.loads['bus'] == bus] + load = network.loads_t.p[filtered_load.index] + + colors = {'biomass':'green', + 'coal':'k', + 'gas':'orange', + 'eeg_gas':'olive', + 'geothermal':'purple', + 'lignite':'brown', + 'oil':'darkgrey', + 'other_non_renewable':'pink', + 'reservoir':'navy', + 'run_of_river':'aqua', + 'pumped_storage':'steelblue', + 'solar':'yellow', + 'uranium':'lime', + 'waste':'sienna', + 'wind':'skyblue', + 'slack':'pink', + 'load shedding': 'red', + 'nan':'m', + 'imports':'salmon'} + +# TODO: column reordering based on available columns + + fig,ax = plt.subplots(1,1) + + fig.set_size_inches(12,6) + colors = [colors[col] for col in p_by_carrier.columns] + if len(colors) == 1: + colors = colors[0] + (p_by_carrier/reso_int).plot(kind="area",ax=ax,linewidth=0, + color=colors) + (load/reso_int).plot(ax=ax, legend='load', lw=2, color='darkgrey', style='--') + ax.legend(ncol=4,loc="upper left") + + ax.set_ylabel(resolution) + ax.set_xlabel("") + + if filename is None: + plt.show() + else: + plt.savefig(filename) + plt.close() + + +def plot_gen_diff(networkA, networkB, leave_out_carriers=['geothermal', 'oil', + 'other_non_renewable', + 'reservoir', 'waste']): + """ + Plot difference in generation between two networks grouped by carrier type + + + Parameters + ---------- + networkA : PyPSA network container with switches + networkB : PyPSA network container without switches + leave_out_carriers : list of carriers to leave out (default to all small + carriers) + + Returns + ------- + Plot + """ + def gen_by_c(network): + gen = pd.concat([network.generators_t.p + [network.generators[network.generators.control!='Slack'].index], + network.generators_t.p[network.generators[network. + generators.control=='Slack'].index].iloc[:,0]. + apply(lambda x: x if x > 0 else 0)], axis=1).\ + groupby(network.generators.carrier, axis=1).sum() + return gen + + gen = gen_by_c(networkB) + gen_switches = gen_by_c(networkA) + diff = gen_switches-gen + + colors = {'biomass':'green', + 'coal':'k', + 'gas':'orange', + 'eeg_gas':'olive', + 'geothermal':'purple', + 'lignite':'brown', + 'oil':'darkgrey', + 'other_non_renewable':'pink', + 'reservoir':'navy', + 'run_of_river':'aqua', + 'pumped_storage':'steelblue', + 'solar':'yellow', + 'uranium':'lime', + 'waste':'sienna', + 'wind':'skyblue', + 'slack':'pink', + 'load shedding': 'red', + 'nan':'m'} + diff.drop(leave_out_carriers, axis=1, inplace=True) + colors = [colors[col] for col in diff.columns] + + plot = diff.plot(kind='line', color=colors, use_index=False) + plot.legend(loc='upper left', ncol=5, prop={'size': 8}) + x = [] + for i in range(0, len(diff)): + x.append(i) + plt.xticks(x, x) + plot.set_xlabel('Timesteps') + plot.set_ylabel('Difference in Generation in MW') + plot.set_title('Difference in Generation') + plt.tight_layout() + +def curtailment(network, carrier='wind', filename=None): + + p_by_carrier = network.generators_t.p.groupby(network.generators.carrier, axis=1).sum() + capacity = network.generators.groupby("carrier").sum().at[carrier,"p_nom"] + p_available = network.generators_t.p_max_pu.multiply(network.generators["p_nom"]) + p_available_by_carrier =p_available.groupby(network.generators.carrier, axis=1).sum() + p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier + p_df = pd.DataFrame({carrier + " available" : p_available_by_carrier[carrier], + carrier + " dispatched" : p_by_carrier[carrier], + carrier + " curtailed" : p_curtailed_by_carrier[carrier]}) + + p_df[carrier + " capacity"] = capacity + p_df[carrier + " curtailed"][p_df[carrier + " curtailed"] < 0.] = 0. + + + fig,ax = plt.subplots(1,1) + fig.set_size_inches(12,6) + p_df[[carrier + " dispatched",carrier + " curtailed"]].plot(kind="area",ax=ax,linewidth=3) + p_df[[carrier + " available",carrier + " capacity"]].plot(ax=ax,linewidth=3) + + ax.set_xlabel("") + ax.set_ylabel("Power [MW]") + ax.set_ylim([0,capacity*1.1]) + ax.legend() + if filename is None: + plt.show() + else: + plt.savefig(filename) + plt.close() + +def storage_distribution(network, filename=None): + """ + Plot storage distribution as circles on grid nodes + + Displays storage size and distribution in network. + Parameters + ---------- + network : PyPSA network container + Holds topology of grid including results from powerflow analysis + filename : str + Specify filename + If not given, figure will be show directly + """ + + stores = network.storage_units + storage_distribution = network.storage_units.p_nom_opt[stores.index].groupby(network.storage_units.bus).sum().reindex(network.buses.index,fill_value=0.) + + fig,ax = plt.subplots(1,1) + fig.set_size_inches(6,6) + + if sum(storage_distribution) == 0: + network.plot(bus_sizes=0,ax=ax,title="No extendable storage") + else: + network.plot(bus_sizes=storage_distribution,ax=ax,line_widths=0.3,title="Storage distribution") + + if filename is None: + plt.show() + else: + plt.savefig(filename) + plt.close() + + +def gen_dist(network, techs=None, snapshot=0, n_cols=3,gen_size=0.2, filename=None): + + """ + Generation distribution + ---------- + network : PyPSA network container + Holds topology of grid including results from powerflow analysis + techs : dict + type of technologies which shall be plotted + snapshot : int + snapshot + n_cols : int + number of columns of the plot + gen_size : num + size of generation bubbles at the buses + filename : str + Specify filename + If not given, figure will be show directly + """ + if techs is None: + techs = network.generators.carrier.unique() + else: + techs = techs + + n_graphs = len(techs) + n_cols = n_cols + + if n_graphs % n_cols == 0: + n_rows = n_graphs // n_cols + else: + n_rows = n_graphs // n_cols + 1 + + + fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols) + + size = 4 + + fig.set_size_inches(size*n_cols,size*n_rows) + + for i,tech in enumerate(techs): + i_row = i // n_cols + i_col = i % n_cols + + ax = axes[i_row,i_col] + + gens = network.generators[network.generators.carrier == tech] + gen_distribution = network.generators_t.p[gens.index].\ + loc[network.snapshots[snapshot]].groupby(network.generators.bus).sum().\ + reindex(network.buses.index,fill_value=0.) + + + network.plot(ax=ax,bus_sizes=gen_size*gen_distribution, line_widths=0.1) + + ax.set_title(tech) + if filename is None: + plt.show() + else: + plt.savefig(filename) +plt.close() + +def gen_dist_diff(networkA, networkB, techs=None, snapshot=0, n_cols=3,gen_size=0.2, filename=None, buscmap=plt.cm.jet): + + """ + Difference in generation distribution + Green/Yellow/Red colors mean that the generation at a location is bigger with switches + than without + Blue colors mean that the generation at a location is smaller with switches + than without + ---------- + networkA : PyPSA network container + Holds topology of grid with switches + including results from powerflow analysis + networkB : PyPSA network container + Holds topology of grid without switches + including results from powerflow analysis + techs : dict + type of technologies which shall be plotted + snapshot : int + snapshot + n_cols : int + number of columns of the plot + gen_size : num + size of generation bubbles at the buses + filename : str + Specify filename + If not given, figure will be show directly + """ + if techs is None: + techs = networkA.generators.carrier.unique() + else: + techs = techs + + n_graphs = len(techs) + n_cols = n_cols + + if n_graphs % n_cols == 0: + n_rows = n_graphs // n_cols + else: + n_rows = n_graphs // n_cols + 1 + + + fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols) + + size = 4 + + fig.set_size_inches(size*n_cols,size*n_rows) + + for i,tech in enumerate(techs): + i_row = i // n_cols + i_col = i % n_cols + + ax = axes[i_row,i_col] + + gensA = networkA.generators[networkA.generators.carrier == tech] + gensB = networkB.generators[networkB.generators.carrier == tech] + + gen_distribution = networkA.generators_t.p[gensA.index].\ + loc[networkA.snapshots[snapshot]].groupby(networkA.generators.bus).sum().\ + reindex(networkA.buses.index,fill_value=0.) - networkB.generators_t.p[gensB.index].\ + loc[networkB.snapshots[snapshot]].groupby(networkB.generators.bus).sum().\ + reindex(networkB.buses.index,fill_value=0.) + + networkA.plot(ax=ax,bus_sizes=gen_size*abs(gen_distribution), + bus_colors=gen_distribution, line_widths=0.1, bus_cmap=buscmap) + + ax.set_title(tech) + + + if filename is None: + plt.show() + else: + plt.savefig(filename) +plt.close() + +def gen_dist(network, techs=None, snapshot=1, n_cols=3,gen_size=0.2, filename=None): + + """ + Generation distribution + + ---------- + network : PyPSA network container + Holds topology of grid including results from powerflow analysis + techs : dict + type of technologies which shall be plotted + snapshot : int + snapshot + n_cols : int + number of columns of the plot + gen_size : num + size of generation bubbles at the buses + filename : str + Specify filename + If not given, figure will be show directly + """ + if techs is None: + techs = network.generators.carrier.unique() + else: + techs = techs + + n_graphs = len(techs) + n_cols = n_cols + + if n_graphs % n_cols == 0: + n_rows = n_graphs // n_cols + else: + n_rows = n_graphs // n_cols + 1 + + + fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols) + + size = 4 + + fig.set_size_inches(size*n_cols,size*n_rows) + + for i,tech in enumerate(techs): + i_row = i // n_cols + i_col = i % n_cols + + ax = axes[i_row,i_col] + + gens = network.generators[network.generators.carrier == tech] + gen_distribution = network.generators_t.p[gens.index].\ + loc[network.snapshots[snapshot]].groupby(network.generators.bus).sum().\ + reindex(network.buses.index,fill_value=0.) + + + + network.plot(ax=ax,bus_sizes=gen_size*gen_distribution, line_widths=0.1) + + ax.set_title(tech) + if filename is None: + plt.show() + else: + plt.savefig(filename) + plt.close() + + + + +if __name__ == '__main__': + pass diff --git a/etrago/plots/snapshot_clustering.py b/etrago/tools/snapshot_clustering.py similarity index 100% rename from etrago/plots/snapshot_clustering.py rename to etrago/tools/snapshot_clustering.py diff --git a/etrago/tools/utilities.py b/etrago/tools/utilities.py new file mode 100644 index 00000000..02320ecc --- /dev/null +++ b/etrago/tools/utilities.py @@ -0,0 +1,527 @@ +""" +Utilities.py defines functions necessary to apply eTraGo. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation; either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +""" + +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" + + +from sqlalchemy.orm import sessionmaker +from sqlalchemy import create_engine +import pandas as pd +import numpy as np +import os +import time +from pyomo.environ import (Var,Constraint, PositiveReals,ConcreteModel) + +def oedb_session(section='oedb'): + """Get SQLAlchemy session object with valid connection to OEDB""" + + # get session object by oemof.db tools (requires .oemof/config.ini + try: + from oemof import db + conn = db.connection(section=section) + + except: + print('Please provide connection parameters to database:') + + host = input('host (default 127.0.0.1): ') or '127.0.0.1' + port = input('port (default 5432): ') or '5432' + user = input('user (default postgres): ') or 'postgres' + database = input('database name: ') + password = input('password: ') + + conn = create_engine( + 'postgresql://' + '%s:%s@%s:%s/%s' % (user, + password, + host, + port, + database)) + + Session = sessionmaker(bind=conn) + session = Session() + return session + + +def buses_of_vlvl(network, voltage_level): + """ Get bus-ids of given voltage level(s). + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + voltage_level: list + + Returns + ------- + list + List containing bus-ids. + """ + + mask = network.buses.v_nom.isin(voltage_level) + df = network.buses[mask] + + return df.index + + +def buses_grid_linked(network, voltage_level): + """ Get bus-ids of a given voltage level connected to the grid. + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + voltage_level: list + + Returns + ------- + list + List containing bus-ids. + """ + + mask = ((network.buses.index.isin(network.lines.bus0) | + (network.buses.index.isin(network.lines.bus1))) & + (network.buses.v_nom.isin(voltage_level))) + + df = network.buses[mask] + + return df.index + + +def clip_foreign(network): + """ + Delete all components and timelines located outside of Germany. + Add transborder flows divided by country of origin as network.foreign_trade. + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + + Returns + ------- + network : :class:`pypsa.Network + Overall container of PyPSA + """ + + # get foreign buses by country + poland = pd.Series(index=network.buses[(network.buses['x'] > 17)].index, + data="Poland") + czech = pd.Series(index=network.buses[(network.buses['x'] < 17) & + (network.buses['x'] > 15.1)].index, + data="Czech") + denmark = pd.Series(index=network.buses[((network.buses['y'] < 60) & + (network.buses['y'] > 55.2)) | + ((network.buses['x'] > 11.95) & + (network.buses['x'] < 11.97) & + (network.buses['y'] > 54.5))].index, + data="Denmark") + sweden = pd.Series(index=network.buses[(network.buses['y'] > 60)].index, + data="Sweden") + austria = pd.Series(index=network.buses[(network.buses['y'] < 47.33) & + (network.buses['x'] > 9) | + ((network.buses['x'] > 9.65) & + (network.buses['x'] < 9.9) & + (network.buses['y'] < 47.5) & + (network.buses['y'] > 47.3)) | + ((network.buses['x'] > 12.14) & + (network.buses['x'] < 12.15) & + (network.buses['y'] > 47.57) & + (network.buses['y'] < 47.58)) | + (network.buses['y'] < 47.6) & + (network.buses['x'] > 14.1)].index, + data="Austria") + switzerland = pd.Series(index=network.buses[((network.buses['x'] > 8.1) & + (network.buses['x'] < 8.3) & + (network.buses['y'] < 46.8)) | + ((network.buses['x'] > 7.82) & + (network.buses['x'] < 7.88) & + (network.buses['y'] > 47.54) & + (network.buses['y'] < 47.57)) | + ((network.buses['x'] > 10.91) & + (network.buses['x'] < 10.92) & + (network.buses['y'] > 49.91) & + (network.buses['y'] < 49.92))].index, + data="Switzerland") + netherlands = pd.Series(index=network.buses[((network.buses['x'] < 6.96) & + (network.buses['y'] < 53.15) & + (network.buses['y'] > 53.1)) | + ((network.buses['x'] < 5.4) & + (network.buses['y'] > 52.1))].index, + data = "Netherlands") + luxembourg = pd.Series(index=network.buses[((network.buses['x'] < 6.15) & + (network.buses['y'] < 49.91) & + (network.buses['y'] > 49.65))].index, + data="Luxembourg") + france = pd.Series(index=network.buses[(network.buses['x'] < 4.5) | + ((network.buses['x'] > 7.507) & + (network.buses['x'] < 7.508) & + (network.buses['y'] > 47.64) & + (network.buses['y'] < 47.65)) | + ((network.buses['x'] > 6.2) & + (network.buses['x'] < 6.3) & + (network.buses['y'] > 49.1) & + (network.buses['y'] < 49.2)) | + ((network.buses['x'] > 6.7) & + (network.buses['x'] < 6.76) & + (network.buses['y'] > 49.13) & + (network.buses['y'] < 49.16))].index, + data="France") + foreign_buses = pd.Series() + foreign_buses = foreign_buses.append([poland, czech, denmark, sweden, austria, switzerland, + netherlands, luxembourg, france]) + + network.buses = network.buses.drop(network.buses.loc[foreign_buses.index].index) + + # identify transborder lines (one bus foreign, one bus not) and the country + # it is coming from + transborder_lines = pd.DataFrame(index=network.lines[ + ((network.lines['bus0'].isin(network.buses.index) == False) & + (network.lines['bus1'].isin(network.buses.index) == True)) | + ((network.lines['bus0'].isin(network.buses.index) == True) & + (network.lines['bus1'].isin(network.buses.index) == False))].index) + transborder_lines['bus0'] = network.lines['bus0'] + transborder_lines['bus1'] = network.lines['bus1'] + transborder_lines['country'] = "" + for i in range (0, len(transborder_lines)): + if transborder_lines.iloc[i, 0] in foreign_buses.index: + transborder_lines['country'][i] = foreign_buses[str(transborder_lines.iloc[i, 0])] + else: + transborder_lines['country'][i] = foreign_buses[str(transborder_lines.iloc[i, 1])] + + # identify amount of flows per line and group to get flow per country + transborder_flows = network.lines_t.p0[transborder_lines.index] + for i in transborder_flows.columns: + if network.lines.loc[str(i)]['bus1'] in foreign_buses.index: + transborder_flows.loc[:, str(i)] = transborder_flows.loc[:, str(i)]*-1 + + network.foreign_trade = transborder_flows.\ + groupby(transborder_lines['country'], axis=1).sum() + + # drop foreign components + network.lines = network.lines.drop(network.lines[ + (network.lines['bus0'].isin(network.buses.index) == False) | + (network.lines['bus1'].isin(network.buses.index) == False)].index) + network.transformers = network.transformers.drop(network.transformers[ + (network.transformers['bus0'].isin(network.buses.index) == False) | + (network.transformers['bus1'].isin(network.buses.index) == False)].index) + network.generators = network.generators.drop(network.generators[ + (network.generators['bus'].isin(network.buses.index) == False)].index) + network.loads = network.loads.drop(network.loads[ + (network.loads['bus'].isin(network.buses.index) == False)].index) + network.storage_units = network.storage_units.drop(network.storage_units[ + (network.storage_units['bus'].isin(network.buses.index) == False)].index) + + components = ['loads', 'generators', 'lines', 'buses', 'transformers'] + for g in components: #loads_t + h = g + '_t' + nw = getattr(network, h) # network.loads_t + for i in nw.keys(): #network.loads_t.p + cols = [j for j in getattr(nw, i).columns if j not in getattr(network, g).index] + for k in cols: + del getattr(nw, i)[k] + + return network + + +def connected_grid_lines(network, busids): + """ Get grid lines connected to given buses. + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + busids : list + List containing bus-ids. + + Returns + ------- + :class:`pandas.DataFrame + PyPSA lines. + """ + + mask = network.lines.bus1.isin(busids) |\ + network.lines.bus0.isin(busids) + + return network.lines[mask] + + +def connected_transformer(network, busids): + """ Get transformer connected to given buses. + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + busids : list + List containing bus-ids. + + Returns + ------- + :class:`pandas.DataFrame + PyPSA transformer. + """ + + mask = (network.transformers.bus0.isin(busids)) + + return network.transformers[mask] + + +def load_shedding (network, **kwargs): + """ Implement load shedding in existing network to identify feasibility problems + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + marginal_cost : int + Marginal costs for load shedding + p_nom : int + Installed capacity of load shedding generator + Returns + ------- + + """ + + marginal_cost_def = 10000#network.generators.marginal_cost.max()*2 + p_nom_def = network.loads_t.p_set.max().max() + + marginal_cost = kwargs.get('marginal_cost', marginal_cost_def) + p_nom = kwargs.get('p_nom', p_nom_def) + + network.add("Carrier", "load") + start = network.generators.index.astype(int).max()+1 + index = list(range(start,start+len(network.buses.index))) + network.import_components_from_dataframe( + pd.DataFrame( + dict(marginal_cost=marginal_cost, + p_nom=p_nom, + carrier='load shedding', + bus=network.buses.index), + index=index), + "Generator" + ) + return + + +def data_manipulation_sh (network): + from shapely.geometry import Point, LineString, MultiLineString + from geoalchemy2.shape import from_shape, to_shape + + #add connection from Luebeck to Siems + + new_bus = str(int(network.buses.index.max())+1) + new_trafo = str(int(network.transformers.index.max())+1) + new_line = str(int(network.lines.index.max())+1) + network.add("Bus", new_bus,carrier='AC', v_nom=220, x=10.760835, y=53.909745) + network.add("Transformer", new_trafo, bus0="25536", bus1=new_bus, x=1.29960, tap_ratio=1, s_nom=1600) + network.add("Line",new_line, bus0="26387",bus1=new_bus, x=0.0001, s_nom=1600) + network.lines.loc[new_line,'cables']=3.0 + + #bus geom + point_bus1 = Point(10.760835,53.909745) + network.buses.set_value(new_bus, 'geom', from_shape(point_bus1, 4326)) + + #line geom/topo + network.lines.set_value(new_line, 'geom', from_shape(MultiLineString([LineString([to_shape(network.buses.geom['26387']),point_bus1])]),4326)) + network.lines.set_value(new_line, 'topo', from_shape(LineString([to_shape(network.buses.geom['26387']),point_bus1]),4326)) + + #trafo geom/topo + network.transformers.set_value(new_trafo, 'geom', from_shape(MultiLineString([LineString([to_shape(network.buses.geom['25536']),point_bus1])]),4326)) + network.transformers.set_value(new_trafo, 'topo', from_shape(LineString([to_shape(network.buses.geom['25536']),point_bus1]),4326)) + + return + +def results_to_csv(network, path): + """ + """ + if path==False: + return None + + if not os.path.exists(path): + os.makedirs(path, exist_ok=True) + + network.export_to_csv_folder(path) + data = pd.read_csv(os.path.join(path, 'network.csv')) + data['time'] = network.results['Solver'].Time + data.to_csv(os.path.join(path, 'network.csv')) + + if hasattr(network, 'Z'): + file = [i for i in os.listdir(path.strip('0123456789')) if i=='Z.csv'] + if file: + print('Z already calculated') + else: + network.Z.to_csv(path.strip('0123456789')+'/Z.csv', index=False) + + return + +def parallelisation(network, start_h, end_h, group_size, solver_name, extra_functionality=None): + + print("Performing linear OPF, {} snapshot(s) at a time:".format(group_size)) + x = time.time() + for i in range(int((end_h-start_h+1)/group_size)): + network.lopf(network.snapshots[group_size*i:group_size*i+group_size], solver_name=solver_name, extra_functionality=extra_functionality) + + + y = time.time() + z = (y - x) / 60 + return + +def pf_post_lopf(network, scenario): + + network_pf = network + + #For the PF, set the P to the optimised P + network_pf.generators_t.p_set = network_pf.generators_t.p_set.reindex(columns=network_pf.generators.index) + network_pf.generators_t.p_set = network_pf.generators_t.p + + old_slack = network.generators.index[network.generators.control == 'Slack'][0] + old_gens = network.generators + gens_summed = network.generators_t.p.sum() + old_gens['p_summed']= gens_summed + max_gen_buses_index = old_gens.groupby(['bus']).agg({'p_summed': np.sum}).p_summed.sort_values().index + + for bus_iter in range(1,len(max_gen_buses_index)-1): + if old_gens[(network.generators['bus']==max_gen_buses_index[-bus_iter])&(network.generators['control']=='PV')].empty: + continue + else: + new_slack_bus = max_gen_buses_index[-bus_iter] + break + + network.generators=network.generators.drop('p_summed',1) + new_slack_gen = network.generators.p_nom[(network.generators['bus'] == new_slack_bus)&(network.generators['control'] == 'PV')].sort_values().index[-1] + + # check if old slack was PV or PQ control: + if network.generators.p_nom[old_slack] > 50 and network.generators.carrier[old_slack] in ('solar','wind'): + old_control = 'PQ' + elif network.generators.p_nom[old_slack] > 50 and network.generators.carrier[old_slack] not in ('solar','wind'): + old_control = 'PV' + elif network.generators.p_nom[old_slack] < 50: + old_control = 'PQ' + + network.generators = network.generators.set_value(old_slack, 'control', old_control) + network.generators = network.generators.set_value(new_slack_gen, 'control', 'Slack') + + #execute non-linear pf + network_pf.pf(scenario.timeindex, use_seed=True) + + return network_pf + +def calc_line_losses(network): + """ Calculate losses per line with PF result data + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + s0 : series + apparent power of line + i0 : series + current of line + ------- + + """ + #### Line losses + # calculate apparent power S = sqrt(p² + q²) [in MW] + s0_lines = ((network.lines_t.p0**2 + network.lines_t.q0**2).\ + apply(np.sqrt)) + # calculate current I = S / U [in A] + i0_lines = np.multiply(s0_lines, 1000000) / np.multiply(network.lines.v_nom, 1000) + # calculate losses per line and timestep network.lines_t.line_losses = I² * R [in MW] + network.lines_t.losses = np.divide(i0_lines**2 * network.lines.r, 1000000) + # calculate total losses per line [in MW] + network.lines = network.lines.assign(losses=np.sum(network.lines_t.losses).values) + + #### Transformer losses + # https://books.google.de/books?id=0glcCgAAQBAJ&pg=PA151&lpg=PA151&dq=wirkungsgrad+transformator+1000+mva&source=bl&ots=a6TKhNfwrJ&sig=r2HCpHczRRqdgzX_JDdlJo4hj-k&hl=de&sa=X&ved=0ahUKEwib5JTFs6fWAhVJY1AKHa1cAeAQ6AEIXjAI#v=onepage&q=wirkungsgrad%20transformator%201000%20mva&f=false + # Crastan, Elektrische Energieversorgung, p.151 + # trafo 1000 MVA: 99.8 % + network.transformers = network.transformers.assign(losses=np.multiply(network.transformers.s_nom,(1-0.998)).values) + + # calculate total losses (possibly enhance with adding these values to network container) + losses_total = sum(network.lines.losses) + sum(network.transformers.losses) + print("Total lines losses for all snapshots [MW]:",round(losses_total,2)) + losses_costs = losses_total * np.average(network.buses_t.marginal_price) + print("Total costs for these losses [EUR]:",round(losses_costs,2)) + + return + +def loading_minimization(network,snapshots): + + network.model.number1 = Var(network.model.passive_branch_p_index, within = PositiveReals) + network.model.number2 = Var(network.model.passive_branch_p_index, within = PositiveReals) + + def cRule(model, c, l, t): + return (model.number1[c, l, t] - model.number2[c, l, t] == model.passive_branch_p[c, l, t]) + + network.model.cRule=Constraint(network.model.passive_branch_p_index, rule=cRule) + + network.model.objective.expr += 0.00001* sum(network.model.number1[i] + network.model.number2[i] for i in network.model.passive_branch_p_index) + + +def group_parallel_lines(network): + + #ordering of buses: (not sure if still necessary, remaining from SQL code) + old_lines = network.lines + + for line in old_lines.index: + bus0_new = str(old_lines.loc[line,['bus0','bus1']].astype(int).min()) + bus1_new = str(old_lines.loc[line,['bus0','bus1']].astype(int).max()) + old_lines.set_value(line,'bus0',bus0_new) + old_lines.set_value(line,'bus1',bus1_new) + + # saving the old index + for line in old_lines: + old_lines['old_index'] = network.lines.index + + grouped = old_lines.groupby(['bus0','bus1']) + + #calculating electrical properties for parallel lines + grouped_agg = grouped.agg({ 'b': np.sum, + 'b_pu': np.sum, + 'cables': np.sum, + 'capital_cost': np.min, + 'frequency': np.mean, + 'g': np.sum, + 'g_pu': np.sum, + 'geom': lambda x: x[0], + 'length': lambda x: x.min(), + 'num_parallel': np.sum, + 'r': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), + 'r_pu': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), + 's_nom': np.sum, + 's_nom_extendable': lambda x: x.min(), + 's_nom_max': np.sum, + 's_nom_min': np.sum, + 's_nom_opt': np.sum, + 'scn_name': lambda x: x.min(), + 'sub_network': lambda x: x.min(), + 'terrain_factor': lambda x: x.min(), + 'topo': lambda x: x[0], + 'type': lambda x: x.min(), + 'v_ang_max': lambda x: x.min(), + 'v_ang_min': lambda x: x.min(), + 'x': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), + 'x_pu': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), + 'old_index': np.min}) + + for i in range(0,len(grouped_agg.index)): + grouped_agg.set_value(grouped_agg.index[i],'bus0',grouped_agg.index[i][0]) + grouped_agg.set_value(grouped_agg.index[i],'bus1',grouped_agg.index[i][1]) + + new_lines=grouped_agg.set_index(grouped_agg.old_index) + new_lines=new_lines.drop('old_index',1) + network.lines = new_lines + + return diff --git a/requirements.txt b/requirements.txt index 74e99cef..b076ad49 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,7 @@ -# use pip install -r requirements.txt to setup your virtualenv - -# ego.powerflow release 0.0.5 -egopowerflow==0.0.5 +# Packages for read the docs +sphinx_rtd_theme -# ego.io release 0.2.11 -egoio==0.2.11 +# use pip install -r requirements.txt to setup your virtualenv # eGo PyPSA fork on dev https://github.com/openego/PyPSA/tree/dev -e git+https://github.com/openego/PyPSA.git@dev#egg=PyPSA diff --git a/setup.py b/setup.py index 880233f3..8094d655 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, Next Energy" +__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems" __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "mariusves" @@ -6,14 +6,25 @@ from setuptools import find_packages, setup setup(name='eTraGo', - author='NEXT ENERGY, ZNES Flensburg', + author='DLR VE, ZNES Flensburg', author_email='', description='electrical Transmission Grid Optimization of flexibility options for transmission grids based on PyPSA', - version='0.3', - url='https://github.com/openego/eTraGo', + version='0.4', + url='https://github.com/openego/eTraGo', license="GNU Affero General Public License Version 3 (AGPL-3.0)", packages=find_packages(), + include_package_data=True, install_requires=['egoio == 0.2.11', - 'egopowerflow == 0.0.5'], - dependency_links=['git+ssh://git@github.com/openego/PyPSA.git@dev#egg=PyPSA'] + 'scikit-learn == 0.19.0', + 'pandas >= 0.17.0, <=0.19.1', + 'pypsa >= 0.8.0, <= 0.8.0', + 'sqlalchemy >= 1.0.15, <= 1.1.4', + 'oemof.db >=0.0.4, <=0.0.4', + 'geoalchemy2 >= 0.3.0, <=0.4.0', + 'matplotlib >= 1.5.3, <=1.5.3'], + dependency_links=['git+ssh://git@github.com/openego/PyPSA.git@dev#egg=PyPSA'], + extras_require={ + 'docs': [ + 'sphinx >= 1.4', + 'sphinx_rtd_theme']} )