diff --git a/src/egon/data/airflow/dags/pipeline.py b/src/egon/data/airflow/dags/pipeline.py index a1e10a6ab..03f96b9c0 100644 --- a/src/egon/data/airflow/dags/pipeline.py +++ b/src/egon/data/airflow/dags/pipeline.py @@ -49,6 +49,11 @@ from egon.data.datasets.heat_etrago import HeatEtrago from egon.data.datasets.heat_etrago.hts_etrago import HtsEtragoTable from egon.data.datasets.heat_supply import HeatSupply +from egon.data.datasets.heat_supply.individual_heating import ( + HeatPumps2050, + HeatPumpsPypsaEurSec, + HeatPumps2035, +) from egon.data.datasets.hydrogen_etrago import ( HydrogenBusEtrago, HydrogenGridEtrago, @@ -329,6 +334,24 @@ ] ) + cts_demand_buildings = CtsDemandBuildings( + dependencies=[ + osm_buildings_streets, + cts_electricity_demand_annual, + hh_demand_buildings_setup, + tasks["heat_demand_timeseries.export-etrago-cts-heat-profiles"], + ] + ) + + # Minimum heat pump capacity for pypsa-eur-sec + heat_pumps_pypsa_eur_sec = HeatPumpsPypsaEurSec( + dependencies=[ + cts_demand_buildings, + DistrictHeatingAreas, + heat_time_series, + ] + ) + # run pypsa-eur-sec run_pypsaeursec = PypsaEurSec( dependencies=[ @@ -339,6 +362,7 @@ data_bundle, electrical_load_etrago, heat_time_series, + heat_pumps_pypsa_eur_sec, ] ) @@ -579,12 +603,23 @@ dependencies=[vg250, setup_etrago, create_gas_polygons_egon2035] ) - cts_demand_buildings = CtsDemandBuildings( + # Heat pump disaggregation for eGon2035 + heat_pumps_2035 = HeatPumps2035( dependencies=[ - osm_buildings_streets, - cts_electricity_demand_annual, - hh_demand_buildings_setup, - tasks["heat_demand_timeseries.export-etrago-cts-heat-profiles"], + cts_demand_buildings, + DistrictHeatingAreas, + heat_supply, + heat_time_series, + heat_pumps_pypsa_eur_sec, + tasks["power_plants.pv-rooftop-to-buildings"] + ] + ) + + # Heat pump disaggregation for eGon100RE + heat_pumps_2050 = HeatPumps2050( + dependencies=[ + run_pypsaeursec, + heat_pumps_pypsa_eur_sec, ] ) diff --git a/src/egon/data/datasets.yml b/src/egon/data/datasets.yml index dd2311cf3..ba358d08f 100755 --- a/src/egon/data/datasets.yml +++ b/src/egon/data/datasets.yml @@ -1090,6 +1090,9 @@ emobility_mit: export_results_to_csv: True parallel_tasks: 10 +demand_timeseries_mvgd: + parallel_tasks: 10 + charging_infrastructure: original_data: sources: diff --git a/src/egon/data/datasets/electricity_demand_timeseries/cts_buildings.py b/src/egon/data/datasets/electricity_demand_timeseries/cts_buildings.py index f37709d2e..76344c985 100644 --- a/src/egon/data/datasets/electricity_demand_timeseries/cts_buildings.py +++ b/src/egon/data/datasets/electricity_demand_timeseries/cts_buildings.py @@ -955,7 +955,6 @@ def calc_building_amenity_share(df_cts_buildings): def calc_cts_building_profiles( - egon_building_ids, bus_ids, scenario, sector, @@ -966,8 +965,6 @@ def calc_cts_building_profiles( Parameters ---------- - egon_building_ids: list of int - Ids of the building for which the profile is calculated. bus_ids: list of int Ids of the substation for which selected building profiles are calculated. @@ -979,7 +976,9 @@ def calc_cts_building_profiles( Returns ------- df_building_profiles: pd.DataFrame - Table of demand profile per building + Table of demand profile per building. Column names are building IDs and index + is hour of the year as int (0-8759). + """ if sector == "electricity": # Get cts building electricity demand share of selected buildings @@ -992,8 +991,8 @@ def calc_cts_building_profiles( EgonCtsElectricityDemandBuildingShare.scenario == scenario ) .filter( - EgonCtsElectricityDemandBuildingShare.building_id.in_( - egon_building_ids + EgonCtsElectricityDemandBuildingShare.bus_id.in_( + bus_ids ) ) ) @@ -1029,8 +1028,8 @@ def calc_cts_building_profiles( ) .filter(EgonCtsHeatDemandBuildingShare.scenario == scenario) .filter( - EgonCtsHeatDemandBuildingShare.building_id.in_( - egon_building_ids + EgonCtsHeatDemandBuildingShare.bus_id.in_( + bus_ids ) ) ) diff --git a/src/egon/data/datasets/electricity_demand_timeseries/tools.py b/src/egon/data/datasets/electricity_demand_timeseries/tools.py index 4ccabe603..fe32d4bb0 100644 --- a/src/egon/data/datasets/electricity_demand_timeseries/tools.py +++ b/src/egon/data/datasets/electricity_demand_timeseries/tools.py @@ -1,5 +1,6 @@ from io import StringIO import csv +import time from shapely.geometry import Point import geopandas as gpd @@ -11,6 +12,23 @@ engine = db.engine() +def timeit(func): + """ + Decorator for measuring function's running time. + """ + + def measure_time(*args, **kw): + start_time = time.time() + result = func(*args, **kw) + print( + "Processing time of %s(): %.2f seconds." + % (func.__qualname__, time.time() - start_time) + ) + return result + + return measure_time + + def random_point_in_square(geom, tol): """ Generate a random point within a square @@ -174,6 +192,8 @@ def write_table_to_postgres( if drop: db_table.__table__.drop(bind=engine, checkfirst=True) db_table.__table__.create(bind=engine) + else: + db_table.__table__.create(bind=engine, checkfirst=True) df.to_sql( name=db_table.__table__.name, diff --git a/src/egon/data/datasets/heat_demand_timeseries/daily.py b/src/egon/data/datasets/heat_demand_timeseries/daily.py index bbbff15bc..9217bd9f8 100644 --- a/src/egon/data/datasets/heat_demand_timeseries/daily.py +++ b/src/egon/data/datasets/heat_demand_timeseries/daily.py @@ -32,7 +32,7 @@ class EgonDailyHeatDemandPerClimateZone(Base): climate_zone = Column(Text, primary_key=True) day_of_year = Column(Integer, primary_key=True) temperature_class = Column(Integer) - heat_demand_share = Column(Float(53)) + daily_demand_share = Column(Float(53)) def temperature_classes(): diff --git a/src/egon/data/datasets/heat_supply/__init__.py b/src/egon/data/datasets/heat_supply/__init__.py index fd356ef86..55511419c 100644 --- a/src/egon/data/datasets/heat_supply/__init__.py +++ b/src/egon/data/datasets/heat_supply/__init__.py @@ -176,8 +176,7 @@ def __init__(self, dependencies): create_tables, { district_heating, - # Temporary drop everything related to rural heat - # individual_heating, + individual_heating, potential_germany, }, ), diff --git a/src/egon/data/datasets/heat_supply/individual_heating.py b/src/egon/data/datasets/heat_supply/individual_heating.py index 2aa442e07..18b1ed6bf 100644 --- a/src/egon/data/datasets/heat_supply/individual_heating.py +++ b/src/egon/data/datasets/heat_supply/individual_heating.py @@ -2,9 +2,260 @@ individual heat supply. """ +from pathlib import Path +import os +import random +import time + +from airflow.operators.python_operator import PythonOperator +from psycopg2.extensions import AsIs, register_adapter +from sqlalchemy import ARRAY, REAL, Column, Integer, String +from sqlalchemy.ext.declarative import declarative_base import geopandas as gpd +import numpy as np import pandas as pd -from egon.data import config, db +import saio + +from egon.data import config, db, logger +from egon.data.datasets import Dataset +from egon.data.datasets.district_heating_areas import ( + MapZensusDistrictHeatingAreas, +) +from egon.data.datasets.electricity_demand_timeseries.cts_buildings import ( + calc_cts_building_profiles, +) +from egon.data.datasets.electricity_demand_timeseries.mapping import ( + EgonMapZensusMvgdBuildings, +) +from egon.data.datasets.electricity_demand_timeseries.tools import ( + write_table_to_postgres, +) +from egon.data.datasets.heat_demand import EgonPetaHeat +from egon.data.datasets.heat_demand_timeseries.daily import ( + EgonDailyHeatDemandPerClimateZone, + EgonMapZensusClimateZones, +) +from egon.data.datasets.heat_demand_timeseries.idp_pool import ( + EgonHeatTimeseries, +) + +# get zensus cells with district heating +from egon.data.datasets.zensus_mv_grid_districts import MapZensusGridDistricts + +engine = db.engine() +Base = declarative_base() + + +class EgonEtragoTimeseriesIndividualHeating(Base): + __tablename__ = "egon_etrago_timeseries_individual_heating" + __table_args__ = {"schema": "demand"} + bus_id = Column(Integer, primary_key=True) + scenario = Column(String, primary_key=True) + carrier = Column(String, primary_key=True) + dist_aggregated_mw = Column(ARRAY(REAL)) + + +class EgonHpCapacityBuildings(Base): + __tablename__ = "egon_hp_capacity_buildings" + __table_args__ = {"schema": "demand"} + building_id = Column(Integer, primary_key=True) + scenario = Column(String, primary_key=True) + hp_capacity = Column(REAL) + + +class HeatPumpsPypsaEurSec(Dataset): + def __init__(self, dependencies): + def dyn_parallel_tasks(): + """Dynamically generate tasks + + The goal is to speed up tasks by parallelising bulks of mvgds. + + The number of parallel tasks is defined via parameter + `parallel_tasks` in the dataset config `datasets.yml`. + + Returns + ------- + set of airflow.PythonOperators + The tasks. Each element is of + :func:`egon.data.datasets.heat_supply.individual_heating. + determine_hp_capacity_eGon2035_pypsa_eur_sec` + """ + parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( + "parallel_tasks", 1 + ) + # ========== Register np datatypes with SQLA ========== + register_adapter(np.float64, adapt_numpy_float64) + register_adapter(np.int64, adapt_numpy_int64) + # ===================================================== + + with db.session_scope() as session: + query = ( + session.query( + MapZensusGridDistricts.bus_id, + ) + .filter( + MapZensusGridDistricts.zensus_population_id + == EgonPetaHeat.zensus_population_id + ) + .distinct(MapZensusGridDistricts.bus_id) + ) + mvgd_ids = pd.read_sql( + query.statement, query.session.bind, index_col=None + ) + + mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True) + + mvgd_ids = np.array_split( + mvgd_ids["bus_id"].values, parallel_tasks + ) + + # mvgd_bunch_size = divmod(MVGD_MIN_COUNT, parallel_tasks)[0] + tasks = set() + for i, bulk in enumerate(mvgd_ids): + tasks.add( + PythonOperator( + task_id=( + f"determine-hp-capacity-pypsa-eur-sec_" + f"mvgd_{min(bulk)}-{max(bulk)}" + ), + python_callable=determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec, + op_kwargs={ + "mvgd_ids": bulk, + }, + ) + ) + return tasks + + super().__init__( + name="HeatPumpsPypsaEurSec", + version="0.0.0", + dependencies=dependencies, + tasks=( + create_peak_load_table, + create_egon_etrago_timeseries_individual_heating, + {*dyn_parallel_tasks()}, + ), + ) + + +class HeatPumps2035(Dataset): + def __init__(self, dependencies): + def dyn_parallel_tasks(): + """Dynamically generate tasks + + The goal is to speed up tasks by parallelising bulks of mvgds. + + The number of parallel tasks is defined via parameter + `parallel_tasks` in the dataset config `datasets.yml`. + + Returns + ------- + set of airflow.PythonOperators + The tasks. Each element is of + :func:`egon.data.datasets.heat_supply.individual_heating. + determine_hp_capacity_eGon2035_pypsa_eur_sec` + """ + parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( + "parallel_tasks", 1 + ) + # ========== Register np datatypes with SQLA ========== + register_adapter(np.float64, adapt_numpy_float64) + register_adapter(np.int64, adapt_numpy_int64) + # ===================================================== + + with db.session_scope() as session: + query = ( + session.query( + MapZensusGridDistricts.bus_id, + ) + .filter( + MapZensusGridDistricts.zensus_population_id + == EgonPetaHeat.zensus_population_id + ) + .distinct(MapZensusGridDistricts.bus_id) + ) + mvgd_ids = pd.read_sql( + query.statement, query.session.bind, index_col=None + ) + + mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True) + + mvgd_ids = np.array_split( + mvgd_ids["bus_id"].values, parallel_tasks + ) + + # mvgd_bunch_size = divmod(MVGD_MIN_COUNT, parallel_tasks)[0] + tasks = set() + for i, bulk in enumerate(mvgd_ids): + tasks.add( + PythonOperator( + task_id=( + f"determine-hp-capacity-eGon2035_" + f"mvgd_{min(bulk)}-{max(bulk)}" + ), + python_callable=determine_hp_cap_peak_load_mvgd_ts_2035, + op_kwargs={ + "mvgd_ids": bulk, + }, + ) + ) + return tasks + + super().__init__( + name="HeatPumps2035", + version="0.0.0", + dependencies=dependencies, + tasks=( + create_hp_capacity_table, + delete_peak_loads_if_existing, + {*dyn_parallel_tasks()}, + ), + ) + + +class HeatPumps2050(Dataset): + def __init__(self, dependencies): + super().__init__( + name="HeatPumps2050", + version="0.0.0", + dependencies=dependencies, + tasks=(determine_hp_cap_buildings_eGon100RE), + ) + + +class BuildingHeatPeakLoads(Base): + __tablename__ = "egon_building_heat_peak_loads" + __table_args__ = {"schema": "demand"} + + building_id = Column(Integer, primary_key=True) + scenario = Column(String, primary_key=True) + sector = Column(String, primary_key=True) + peak_load_in_w = Column(REAL) + + +def adapt_numpy_float64(numpy_float64): + return AsIs(numpy_float64) + + +def adapt_numpy_int64(numpy_int64): + return AsIs(numpy_int64) + + +def timeit(func): + """ + Decorator for measuring function's running time. + """ + + def measure_time(*args, **kw): + start_time = time.time() + result = func(*args, **kw) + print( + "Processing time of %s(): %.2f seconds." + % (func.__qualname__, time.time() - start_time) + ) + return result + + return measure_time def cascade_per_technology( @@ -15,7 +266,7 @@ def cascade_per_technology( max_size_individual_chp=0.05, ): - """ Add plants for individual heat. + """Add plants for individual heat. Currently only on mv grid district level. Parameters @@ -215,6 +466,260 @@ def cascade_heat_supply_indiv(scenario, distribution_level, plotting=True): ) +def get_peta_demand(mvgd, scenario): + """ + Retrieve annual peta heat demand for residential buildings for either + eGon2035 or eGon100RE scenario. + + Parameters + ---------- + mvgd : int + MV grid ID. + scenario : str + Possible options are eGon2035 or eGon100RE + + Returns + ------- + df_peta_demand : pd.DataFrame + Annual residential heat demand per building and scenario. Columns of the + dataframe are zensus_population_id and demand. + + """ + + with db.session_scope() as session: + query = ( + session.query( + MapZensusGridDistricts.zensus_population_id, + EgonPetaHeat.demand, + ) + .filter(MapZensusGridDistricts.bus_id == mvgd) + .filter( + MapZensusGridDistricts.zensus_population_id + == EgonPetaHeat.zensus_population_id + ) + .filter( + EgonPetaHeat.sector == "residential", + EgonPetaHeat.scenario == scenario, + ) + ) + + df_peta_demand = pd.read_sql( + query.statement, query.session.bind, index_col=None + ) + + return df_peta_demand + + +def get_residential_heat_profile_ids(mvgd): + """ + Retrieve 365 daily heat profiles ids per residential building and selected + mvgd. + + Parameters + ---------- + mvgd : int + ID of MVGD + + Returns + ------- + df_profiles_ids : pd.DataFrame + Residential daily heat profile ID's per building. Columns of the dataframe + are zensus_population_id, building_id, selected_idp_profiles, buildings + and day_of_year. + + """ + with db.session_scope() as session: + query = ( + session.query( + MapZensusGridDistricts.zensus_population_id, + EgonHeatTimeseries.building_id, + EgonHeatTimeseries.selected_idp_profiles, + ) + .filter(MapZensusGridDistricts.bus_id == mvgd) + .filter( + MapZensusGridDistricts.zensus_population_id + == EgonHeatTimeseries.zensus_population_id + ) + ) + + df_profiles_ids = pd.read_sql( + query.statement, query.session.bind, index_col=None + ) + # Add building count per cell + df_profiles_ids = pd.merge( + left=df_profiles_ids, + right=df_profiles_ids.groupby("zensus_population_id")["building_id"] + .count() + .rename("buildings"), + left_on="zensus_population_id", + right_index=True, + ) + + # unnest array of ids per building + df_profiles_ids = df_profiles_ids.explode("selected_idp_profiles") + # add day of year column by order of list + df_profiles_ids["day_of_year"] = ( + df_profiles_ids.groupby("building_id").cumcount() + 1 + ) + return df_profiles_ids + + +def get_daily_profiles(profile_ids): + """ + Parameters + ---------- + profile_ids : list(int) + daily heat profile ID's + + Returns + ------- + df_profiles : pd.DataFrame + Residential daily heat profiles. Columns of the dataframe are idp, house, + temperature_class and hour. + + """ + saio.register_schema("demand", db.engine()) + from saio.demand import egon_heat_idp_pool + + with db.session_scope() as session: + query = session.query(egon_heat_idp_pool).filter( + egon_heat_idp_pool.index.in_(profile_ids) + ) + + df_profiles = pd.read_sql( + query.statement, query.session.bind, index_col="index" + ) + + # unnest array of profile values per id + df_profiles = df_profiles.explode("idp") + # Add column for hour of day + df_profiles["hour"] = df_profiles.groupby(axis=0, level=0).cumcount() + 1 + + return df_profiles + + +def get_daily_demand_share(mvgd): + """per census cell + Parameters + ---------- + mvgd : int + MVGD id + + Returns + ------- + df_daily_demand_share : pd.DataFrame + Daily annual demand share per cencus cell. Columns of the dataframe + are zensus_population_id, day_of_year and daily_demand_share. + + """ + + with db.session_scope() as session: + query = session.query( + MapZensusGridDistricts.zensus_population_id, + EgonDailyHeatDemandPerClimateZone.day_of_year, + EgonDailyHeatDemandPerClimateZone.daily_demand_share, + ).filter( + EgonMapZensusClimateZones.climate_zone + == EgonDailyHeatDemandPerClimateZone.climate_zone, + MapZensusGridDistricts.zensus_population_id + == EgonMapZensusClimateZones.zensus_population_id, + MapZensusGridDistricts.bus_id == mvgd, + ) + + df_daily_demand_share = pd.read_sql( + query.statement, query.session.bind, index_col=None + ) + return df_daily_demand_share + + +def calc_residential_heat_profiles_per_mvgd(mvgd, scenario): + """ + Gets residential heat profiles per building in MV grid for either eGon2035 + or eGon100RE scenario. + + Parameters + ---------- + mvgd : int + MV grid ID. + scenario : str + Possible options are eGon2035 or eGon100RE. + + Returns + -------- + pd.DataFrame + Heat demand profiles of buildings. Columns are: + * zensus_population_id : int + Zensus cell ID building is in. + * building_id : int + ID of building. + * day_of_year : int + Day of the year (1 - 365). + * hour : int + Hour of the day (1 - 24). + * demand_ts : float + Building's residential heat demand in MW, for specified hour + of the year (specified through columns `day_of_year` and + `hour`). + """ + + columns = [ + "zensus_population_id", + "building_id", + "day_of_year", + "hour", + "demand_ts", + ] + + df_peta_demand = get_peta_demand(mvgd, scenario) + + # TODO maybe return empty dataframe + if df_peta_demand.empty: + logger.info(f"No demand for MVGD: {mvgd}") + return pd.DataFrame(columns=columns) + + df_profiles_ids = get_residential_heat_profile_ids(mvgd) + + if df_profiles_ids.empty: + logger.info(f"No profiles for MVGD: {mvgd}") + return pd.DataFrame(columns=columns) + + df_profiles = get_daily_profiles( + df_profiles_ids["selected_idp_profiles"].unique() + ) + + df_daily_demand_share = get_daily_demand_share(mvgd) + + # Merge profile ids to peta demand by zensus_population_id + df_profile_merge = pd.merge( + left=df_peta_demand, right=df_profiles_ids, on="zensus_population_id" + ) + + # Merge daily demand to daily profile ids by zensus_population_id and day + df_profile_merge = pd.merge( + left=df_profile_merge, + right=df_daily_demand_share, + on=["zensus_population_id", "day_of_year"], + ) + + # Merge daily profiles by profile id + df_profile_merge = pd.merge( + left=df_profile_merge, + right=df_profiles[["idp", "hour"]], + left_on="selected_idp_profiles", + right_index=True, + ) + + # Scale profiles + df_profile_merge["demand_ts"] = ( + df_profile_merge["idp"] + .mul(df_profile_merge["daily_demand_share"]) + .mul(df_profile_merge["demand"]) + .div(df_profile_merge["buildings"]) + ) + + return df_profile_merge.loc[:, columns] + + def plot_heat_supply(resulting_capacities): from matplotlib import pyplot as plt @@ -246,3 +751,962 @@ def plot_heat_supply(resulting_capacities): }, ) plt.savefig(f"plots/individual_heat_supply_{c}.png", dpi=300) + + +def get_zensus_cells_with_decentral_heat_demand_in_mv_grid( + scenario, mv_grid_id +): + """ + Returns zensus cell IDs with decentral heating systems in given MV grid. + + As cells with district heating differ between scenarios, this is also + depending on the scenario. + + Parameters + ----------- + scenario : str + Name of scenario. Can be either "eGon2035" or "eGon100RE". + mv_grid_id : int + ID of MV grid. + + Returns + -------- + pd.Index(int) + Zensus cell IDs (as int) of buildings with decentral heating systems in + given MV grid. Type is pandas Index to avoid errors later on when it is + used in a query. + + """ + + # get zensus cells in grid + zensus_population_ids = db.select_dataframe( + f""" + SELECT zensus_population_id + FROM boundaries.egon_map_zensus_grid_districts + WHERE bus_id = {mv_grid_id} + """, + index_col=None, + ).zensus_population_id.values + + # maybe use adapter + # convert to pd.Index (otherwise type is np.int64, which will for some + # reason throw an error when used in a query) + zensus_population_ids = pd.Index(zensus_population_ids) + + # get zensus cells with district heating + with db.session_scope() as session: + query = session.query( + MapZensusDistrictHeatingAreas.zensus_population_id, + ).filter( + MapZensusDistrictHeatingAreas.scenario == scenario, + MapZensusDistrictHeatingAreas.zensus_population_id.in_( + zensus_population_ids + ), + ) + + cells_with_dh = pd.read_sql( + query.statement, query.session.bind, index_col=None + ).zensus_population_id.values + + # remove zensus cells with district heating + zensus_population_ids = zensus_population_ids.drop( + cells_with_dh, errors="ignore" + ) + return pd.Index(zensus_population_ids) + + +def get_residential_buildings_with_decentral_heat_demand_in_mv_grid( + scenario, mv_grid_id +): + """ + Returns building IDs of buildings with decentral residential heat demand in + given MV grid. + + As cells with district heating differ between scenarios, this is also + depending on the scenario. + + Parameters + ----------- + scenario : str + Name of scenario. Can be either "eGon2035" or "eGon100RE". + mv_grid_id : int + ID of MV grid. + + Returns + -------- + pd.Index(int) + Building IDs (as int) of buildings with decentral heating system in + given MV grid. Type is pandas Index to avoid errors later on when it is + used in a query. + + """ + # get zensus cells with decentral heating + zensus_population_ids = ( + get_zensus_cells_with_decentral_heat_demand_in_mv_grid( + scenario, mv_grid_id + ) + ) + + # get buildings with decentral heat demand + saio.register_schema("demand", engine) + from saio.demand import egon_heat_timeseries_selected_profiles + + with db.session_scope() as session: + query = session.query( + egon_heat_timeseries_selected_profiles.building_id, + ).filter( + egon_heat_timeseries_selected_profiles.zensus_population_id.in_( + zensus_population_ids + ) + ) + + buildings_with_heat_demand = pd.read_sql( + query.statement, query.session.bind, index_col=None + ).building_id.values + + return pd.Index(buildings_with_heat_demand) + + +def get_cts_buildings_with_decentral_heat_demand_in_mv_grid( + scenario, mv_grid_id +): + """ + Returns building IDs of buildings with decentral CTS heat demand in + given MV grid. + + As cells with district heating differ between scenarios, this is also + depending on the scenario. + + Parameters + ----------- + scenario : str + Name of scenario. Can be either "eGon2035" or "eGon100RE". + mv_grid_id : int + ID of MV grid. + + Returns + -------- + pd.Index(int) + Building IDs (as int) of buildings with decentral heating system in + given MV grid. Type is pandas Index to avoid errors later on when it is + used in a query. + + """ + + # get zensus cells with decentral heating + zensus_population_ids = ( + get_zensus_cells_with_decentral_heat_demand_in_mv_grid( + scenario, mv_grid_id + ) + ) + + # get buildings with decentral heat demand + with db.session_scope() as session: + query = session.query(EgonMapZensusMvgdBuildings.building_id).filter( + EgonMapZensusMvgdBuildings.sector == "cts", + EgonMapZensusMvgdBuildings.zensus_population_id.in_( + zensus_population_ids + ), + ) + + buildings_with_heat_demand = pd.read_sql( + query.statement, query.session.bind, index_col=None + ).building_id.values + + return pd.Index(buildings_with_heat_demand) + + +def get_buildings_with_decentral_heat_demand_in_mv_grid(mvgd, scenario): + """ + Returns building IDs of buildings with decentral heat demand in + given MV grid. + + As cells with district heating differ between scenarios, this is also + depending on the scenario. + + Parameters + ----------- + mv_grid_id : int + ID of MV grid. + scenario : str + Name of scenario. Can be either "eGon2035" or "eGon100RE". + + Returns + -------- + pd.Index(int) + Building IDs (as int) of buildings with decentral heating system in given + MV grid. Type is pandas Index to avoid errors later on when it is + used in a query. + + """ + # get residential buildings with decentral heating systems + buildings_decentral_heating_res = ( + get_residential_buildings_with_decentral_heat_demand_in_mv_grid( + scenario, mvgd + ) + ) + + # get CTS buildings with decentral heating systems + buildings_decentral_heating_cts = ( + get_cts_buildings_with_decentral_heat_demand_in_mv_grid(scenario, mvgd) + ) + + # merge residential and CTS buildings + buildings_decentral_heating = buildings_decentral_heating_res.append( + buildings_decentral_heating_cts + ).unique() + + return buildings_decentral_heating + + +def get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id): + """ + Returns total heat pump capacity per grid that was previously defined + (by NEP or pypsa-eur-sec). + + Parameters + ----------- + scenario : str + Name of scenario. Can be either "eGon2035" or "eGon100RE". + mv_grid_id : int + ID of MV grid. + + Returns + -------- + float + Total heat pump capacity in MW in given MV grid. + + """ + from egon.data.datasets.heat_supply import EgonIndividualHeatingSupply + + with db.session_scope() as session: + query = ( + session.query( + EgonIndividualHeatingSupply.mv_grid_id, + EgonIndividualHeatingSupply.capacity, + ) + .filter(EgonIndividualHeatingSupply.scenario == scenario) + .filter(EgonIndividualHeatingSupply.carrier == "heat_pump") + .filter(EgonIndividualHeatingSupply.mv_grid_id == mv_grid_id) + ) + + hp_cap_mv_grid = pd.read_sql( + query.statement, query.session.bind, index_col="mv_grid_id" + ) + if hp_cap_mv_grid.empty: + return 0.0 + else: + return hp_cap_mv_grid.capacity.values[0] + + +def get_heat_peak_demand_per_building(scenario, building_ids): + """""" + + with db.session_scope() as session: + query = ( + session.query( + BuildingHeatPeakLoads.building_id, + BuildingHeatPeakLoads.peak_load_in_w, + ).filter(BuildingHeatPeakLoads.scenario == scenario) + # .filter(BuildingHeatPeakLoads.sector == "both") + .filter(BuildingHeatPeakLoads.building_id.in_(building_ids)) + ) + + df_heat_peak_demand = pd.read_sql( + query.statement, query.session.bind, index_col=None + ) + + # TODO remove check + if df_heat_peak_demand.duplicated("building_id").any(): + raise ValueError("Duplicate building_id") + + # convert to series and from W to MW + df_heat_peak_demand = ( + df_heat_peak_demand.set_index("building_id").loc[:, "peak_load_in_w"] + * 1e6 + ) + return df_heat_peak_demand + + +def determine_minimum_hp_capacity_per_building( + peak_heat_demand, flexibility_factor=24 / 18, cop=1.7 +): + """ + Determines minimum required heat pump capacity. + + Parameters + ---------- + peak_heat_demand : pd.Series + Series with peak heat demand per building in MW. Index contains the + building ID. + flexibility_factor : float + Factor to overdimension the heat pump to allow for some flexible + dispatch in times of high heat demand. Per default, a factor of 24/18 + is used, to take into account + + Returns + ------- + pd.Series + Pandas series with minimum required heat pump capacity per building in + MW. + + """ + return peak_heat_demand * flexibility_factor / cop + + +def determine_buildings_with_hp_in_mv_grid( + hp_cap_mv_grid, min_hp_cap_per_building +): + """ + Distributes given total heat pump capacity to buildings based on their peak + heat demand. + + Parameters + ----------- + hp_cap_mv_grid : float + Total heat pump capacity in MW in given MV grid. + min_hp_cap_per_building : pd.Series + Pandas series with minimum required heat pump capacity per building + in MW. + + Returns + ------- + pd.Index(int) + Building IDs (as int) of buildings to get heat demand time series for. + + """ + building_ids = min_hp_cap_per_building.index + + # get buildings with PV to give them a higher priority when selecting + # buildings a heat pump will be allocated to + saio.register_schema("supply", engine) + from saio.supply import egon_power_plants_pv_roof_building + + with db.session_scope() as session: + query = session.query( + egon_power_plants_pv_roof_building.building_id + ).filter( + egon_power_plants_pv_roof_building.building_id.in_(building_ids) + ) + + buildings_with_pv = pd.read_sql( + query.statement, query.session.bind, index_col=None + ).building_id.values + # set different weights for buildings with PV and without PV + weight_with_pv = 1.5 + weight_without_pv = 1.0 + weights = pd.concat( + [ + pd.DataFrame( + {"weight": weight_without_pv}, + index=building_ids.drop(buildings_with_pv, errors="ignore"), + ), + pd.DataFrame({"weight": weight_with_pv}, index=buildings_with_pv), + ] + ) + # normalise weights (probability needs to add up to 1) + weights.weight = weights.weight / weights.weight.sum() + + # get random order at which buildings are chosen + np.random.seed(db.credentials()["--random-seed"]) + buildings_with_hp_order = np.random.choice( + weights.index, + size=len(weights), + replace=False, + p=weights.weight.values, + ) + + # select buildings until HP capacity in MV grid is reached (some rest + # capacity will remain) + hp_cumsum = min_hp_cap_per_building.loc[buildings_with_hp_order].cumsum() + buildings_with_hp = hp_cumsum[hp_cumsum <= hp_cap_mv_grid].index + + # choose random heat pumps until remaining heat pumps are larger than + # remaining heat pump capacity + remaining_hp_cap = ( + hp_cap_mv_grid - min_hp_cap_per_building.loc[buildings_with_hp].sum() + ) + min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[ + building_ids.drop(buildings_with_hp) + ] + possible_buildings = min_cap_buildings_wo_hp[ + min_cap_buildings_wo_hp <= remaining_hp_cap + ].index + while len(possible_buildings) > 0: + random.seed(db.credentials()["--random-seed"]) + new_hp_building = random.choice(possible_buildings) + # add new building to building with HP + buildings_with_hp = buildings_with_hp.append( + pd.Index([new_hp_building]) + ) + # determine if there are still possible buildings + remaining_hp_cap = ( + hp_cap_mv_grid + - min_hp_cap_per_building.loc[buildings_with_hp].sum() + ) + min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[ + building_ids.drop(buildings_with_hp) + ] + possible_buildings = min_cap_buildings_wo_hp[ + min_cap_buildings_wo_hp <= remaining_hp_cap + ].index + + return buildings_with_hp + + +def desaggregate_hp_capacity(min_hp_cap_per_building, hp_cap_mv_grid): + """ + Desaggregates the required total heat pump capacity to buildings. + + All buildings are previously assigned a minimum required heat pump + capacity. If the total heat pump capacity exceeds this, larger heat pumps + are assigned. + + Parameters + ------------ + min_hp_cap_per_building : pd.Series + Pandas series with minimum required heat pump capacity per building + in MW. + hp_cap_mv_grid : float + Total heat pump capacity in MW in given MV grid. + + Returns + -------- + pd.Series + Pandas series with heat pump capacity per building in MW. + + """ + # distribute remaining capacity to all buildings with HP depending on + # installed HP capacity + + allocated_cap = min_hp_cap_per_building.sum() + remaining_cap = hp_cap_mv_grid - allocated_cap + + fac = remaining_cap / allocated_cap + hp_cap_per_building = ( + min_hp_cap_per_building * fac + min_hp_cap_per_building + ) + hp_cap_per_building.index.name = "building_id" + + return hp_cap_per_building + + +def determine_min_hp_cap_buildings_pypsa_eur_sec( + peak_heat_demand, building_ids +): + """ + Determines minimum required HP capacity in MV grid in MW as input for + pypsa-eur-sec. + + Parameters + ---------- + peak_heat_demand : pd.Series + Series with peak heat demand per building in MW. Index contains the + building ID. + building_ids : pd.Index(int) + Building IDs (as int) of buildings with decentral heating system in + given MV grid. + + Returns + -------- + float + Minimum required HP capacity in MV grid in MW. + + """ + if len(building_ids) > 0: + peak_heat_demand = peak_heat_demand.loc[building_ids] + # determine minimum required heat pump capacity per building + min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( + peak_heat_demand + ) + return min_hp_cap_buildings.sum() + else: + return 0.0 + + +def determine_hp_cap_buildings_eGon2035_per_mvgd( + mv_grid_id, peak_heat_demand, building_ids +): + """ + Determines which buildings in the MV grid will have a HP (buildings with PV + rooftop are more likely to be assigned) in the eGon2035 scenario, as well + as their respective HP capacity in MW. + + Parameters + ----------- + mv_grid_id : int + ID of MV grid. + peak_heat_demand : pd.Series + Series with peak heat demand per building in MW. Index contains the + building ID. + building_ids : pd.Index(int) + Building IDs (as int) of buildings with decentral heating system in + given MV grid. + + """ + + hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid( + "eGon2035", mv_grid_id + ) + + if len(building_ids) > 0 and hp_cap_grid > 0.0: + peak_heat_demand = peak_heat_demand.loc[building_ids] + + # determine minimum required heat pump capacity per building + min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( + peak_heat_demand + ) + + # select buildings that will have a heat pump + buildings_with_hp = determine_buildings_with_hp_in_mv_grid( + hp_cap_grid, min_hp_cap_buildings + ) + + # distribute total heat pump capacity to all buildings with HP + hp_cap_per_building = desaggregate_hp_capacity( + min_hp_cap_buildings.loc[buildings_with_hp], hp_cap_grid + ) + + return hp_cap_per_building.rename("hp_capacity") + + else: + return pd.Series(dtype="float64").rename("hp_capacity") + + +def determine_hp_cap_buildings_eGon100RE_per_mvgd(mv_grid_id): + """ + Determines HP capacity per building in eGon100RE scenario. + + In eGon100RE scenario all buildings without district heating get a heat + pump. + + Returns + -------- + pd.Series + Pandas series with heat pump capacity per building in MW. + + """ + + hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid( + "eGon100RE", mv_grid_id + ) + + if hp_cap_grid > 0.0: + + # get buildings with decentral heating systems + building_ids = get_buildings_with_decentral_heat_demand_in_mv_grid( + mv_grid_id, scenario="eGon100RE" + ) + + # TODO get peak demand from db + df_peak_heat_demand = get_heat_peak_demand_per_building( + "eGon100RE", building_ids + ) + + # determine minimum required heat pump capacity per building + min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( + df_peak_heat_demand, flexibility_factor=24 / 18, cop=1.7 + ) + + # distribute total heat pump capacity to all buildings with HP + hp_cap_per_building = desaggregate_hp_capacity( + min_hp_cap_buildings, hp_cap_grid + ) + + return hp_cap_per_building.rename("hp_capacity") + else: + return pd.Series(dtype="float64").rename("hp_capacity") + + +def determine_hp_cap_buildings_eGon100RE(): + """ + Main function to determine HP capacity per building in eGon100RE scenario. + + """ + + # ========== Register np datatypes with SQLA ========== + register_adapter(np.float64, adapt_numpy_float64) + register_adapter(np.int64, adapt_numpy_int64) + # ===================================================== + + with db.session_scope() as session: + query = ( + session.query( + MapZensusGridDistricts.bus_id, + ) + .filter( + MapZensusGridDistricts.zensus_population_id + == EgonPetaHeat.zensus_population_id + ) + .distinct(MapZensusGridDistricts.bus_id) + ) + mvgd_ids = pd.read_sql(query.statement, query.session.bind, index_col=None) + mvgd_ids = mvgd_ids.sort_values("bus_id") + + df_hp_cap_per_building_100RE_db = pd.DataFrame( + columns=["building_id", "hp_capacity"] + ) + + for mvgd_id in mvgd_ids["bus_id"].values: + + hp_cap_per_building_100RE = ( + determine_hp_cap_buildings_eGon100RE_per_mvgd(mvgd_id) + ) + + if not hp_cap_per_building_100RE.empty: + df_hp_cap_per_building_100RE_db = pd.concat( + [ + df_hp_cap_per_building_100RE_db, + hp_cap_per_building_100RE.reset_index(), + ], + axis=0, + ) + + df_hp_cap_per_building_100RE_db["scenario"] = "eGon100RE" + + write_table_to_postgres( + df_hp_cap_per_building_100RE_db, + EgonHpCapacityBuildings, + engine=engine, + drop=False, + ) + + +def aggregate_residential_and_cts_profiles(mvgd, scenario): + """ + Gets residential and CTS heat demand profiles per building and aggregates them. + + Parameters + ---------- + mvgd : int + MV grid ID. + scenario : str + Possible options are eGon2035 or eGon100RE. + + Returns + -------- + pd.DataFrame + Table of demand profile per building. Column names are building IDs and index + is hour of the year as int (0-8759). + + """ + # ############### get residential heat demand profiles ############### + df_heat_ts = calc_residential_heat_profiles_per_mvgd( + mvgd=mvgd, scenario=scenario + ) + + # pivot to allow aggregation with CTS profiles + df_heat_ts = df_heat_ts.pivot( + index=["day_of_year", "hour"], + columns="building_id", + values="demand_ts", + ) + df_heat_ts = df_heat_ts.sort_index().reset_index(drop=True) + + # ############### get CTS heat demand profiles ############### + heat_demand_cts_ts = calc_cts_building_profiles( + bus_ids=[mvgd], + scenario=scenario, + sector="heat", + ) + + # ############# aggregate residential and CTS demand profiles ############# + df_heat_ts = pd.concat([df_heat_ts, heat_demand_cts_ts], axis=1) + + df_heat_ts = df_heat_ts.groupby(axis=1, level=0).sum() + + return df_heat_ts + + +def export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db): + """""" + + df_peak_loads_db = df_peak_loads_db.melt( + id_vars="building_id", + var_name="scenario", + value_name="peak_load_in_w", + ) + df_peak_loads_db["sector"] = "residential+cts" + # From MW to W + df_peak_loads_db["peak_load_in_w"] = ( + df_peak_loads_db["peak_load_in_w"] * 1e6 + ) + write_table_to_postgres( + df_peak_loads_db, BuildingHeatPeakLoads, engine=engine + ) + + columns = { + column.key: column.type + for column in EgonEtragoTimeseriesIndividualHeating.__table__.columns + } + df_heat_mvgd_ts_db = df_heat_mvgd_ts_db.loc[:, columns.keys()] + + df_heat_mvgd_ts_db.to_sql( + name=EgonEtragoTimeseriesIndividualHeating.__table__.name, + schema=EgonEtragoTimeseriesIndividualHeating.__table__.schema, + con=engine, + if_exists="append", + method="multi", + index=False, + dtype=columns, + ) + + +def export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec): + + df_hp_min_cap_mv_grid_pypsa_eur_sec.index.name = "mvgd_id" + df_hp_min_cap_mv_grid_pypsa_eur_sec = ( + df_hp_min_cap_mv_grid_pypsa_eur_sec.to_frame( + name="min_hp_capacity" + ).reset_index() + ) + + folder = Path(".") / "input-pypsa-eur-sec" + file = folder / "minimum_hp_capacity_mv_grid_2035.csv" + # Create the folder, if it does not exist already + if not os.path.exists(folder): + os.mkdir(folder) + # TODO check append + if not file.is_file(): + df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(file) + # TODO outsource into separate task incl delete file if clearing + else: + df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv( + file, mode="a", header=False + ) + + +def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids): + """ + Main function to determine HP capacity per building in eGon2035 scenario. + Further, creates heat demand time series for all buildings with heat pumps + in MV grid, as well as for all buildings with gas boilers, used in eTraGo. + + Parameters + ----------- + mvgd_ids : list(int) + List of MV grid IDs to determine data for. + + """ + + # ========== Register np datatypes with SQLA ========== + register_adapter(np.float64, adapt_numpy_float64) + register_adapter(np.int64, adapt_numpy_int64) + # ===================================================== + + df_peak_loads_db = pd.DataFrame() + df_hp_cap_per_building_2035_db = pd.DataFrame() + df_heat_mvgd_ts_db = pd.DataFrame() + + for mvgd in mvgd_ids: + + logger.debug(f"MVGD={mvgd} | Start") + + # ############# aggregate residential and CTS demand profiles ##### + + df_heat_ts = aggregate_residential_and_cts_profiles( + mvgd, scenario="eGon2035" + ) + + # ##################### determine peak loads ################### + logger.debug(f"MVGD={mvgd} | Determine peak loads.") + + peak_load = df_heat_ts.max().rename("eGon2035") + + # ######## determine HP capacity per building ######### + logger.debug(f"MVGD={mvgd} | Determine HP capacities.") + + buildings_decentral_heating = ( + get_buildings_with_decentral_heat_demand_in_mv_grid( + mvgd, scenario="eGon2035" + ) + ) + hp_cap_per_building_2035 = ( + determine_hp_cap_buildings_eGon2035_per_mvgd( + mvgd, + peak_load, + buildings_decentral_heating, + ) + ) + buildings_gas_2035 = pd.Index(buildings_decentral_heating).drop( + hp_cap_per_building_2035.index + ) + + # ################ aggregated heat profiles ################### + logger.debug(f"MVGD={mvgd} | Aggregate heat profiles.") + + df_mvgd_ts_2035_hp = df_heat_ts.loc[ + :, + hp_cap_per_building_2035.index, + ].sum(axis=1) + + # heat demand time series for buildings with gas boiler + df_mvgd_ts_2035_gas = df_heat_ts.loc[:, buildings_gas_2035].sum(axis=1) + + df_heat_mvgd_ts = pd.DataFrame( + data={ + "carrier": ["heat_pump", "CH4"], + "bus_id": mvgd, + "scenario": ["eGon2035", "eGon2035"], + "dist_aggregated_mw": [ + df_mvgd_ts_2035_hp.to_list(), + df_mvgd_ts_2035_gas.to_list(), + ], + } + ) + + # ################ collect results ################## + logger.debug(f"MVGD={mvgd} | Collect results.") + + df_peak_loads_db = pd.concat( + [df_peak_loads_db, peak_load.reset_index()], + axis=0, + ignore_index=True, + ) + + df_heat_mvgd_ts_db = pd.concat( + [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True + ) + + df_hp_cap_per_building_2035_db = pd.concat( + [ + df_hp_cap_per_building_2035_db, + hp_cap_per_building_2035.reset_index(), + ], + axis=0, + ) + + # ################ export to db ####################### + logger.debug(" Write data to db.") + export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db) + + df_hp_cap_per_building_2035_db["scenario"] = "eGon2035" + write_table_to_postgres( + df_hp_cap_per_building_2035_db, + EgonHpCapacityBuildings, + engine=engine, + drop=False, + ) + + +def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids): + """ + Main function to determine minimum required HP capacity in MV for pypsa-eur-sec. + Further, creates heat demand time series for all buildings with heat pumps in MV + grid in eGon100RE scenario, used in eTraGo. + + Parameters + ----------- + mvgd_ids : list(int) + List of MV grid IDs to determine data for. + + """ + + # ========== Register np datatypes with SQLA ========== + register_adapter(np.float64, adapt_numpy_float64) + register_adapter(np.int64, adapt_numpy_int64) + # ===================================================== + + df_peak_loads_db = pd.DataFrame() + df_heat_mvgd_ts_db = pd.DataFrame() + df_hp_min_cap_mv_grid_pypsa_eur_sec = pd.Series(dtype="float64") + + for mvgd in mvgd_ids: + + logger.debug(f"MVGD={mvgd} | Start") + + # ############# aggregate residential and CTS demand profiles ##### + + df_heat_ts = aggregate_residential_and_cts_profiles( + mvgd, scenario="eGon100RE" + ) + + # ##################### determine peak loads ################### + logger.debug(f"MVGD={mvgd} | Determine peak loads.") + + peak_load_100RE = df_heat_ts.max().rename("eGon100RE") + + # ######## determine minimum HP capacity pypsa-eur-sec ########### + buildings_decentral_heating = ( + get_buildings_with_decentral_heat_demand_in_mv_grid( + mvgd, scenario="eGon100RE" + ) + ) + hp_min_cap_mv_grid_pypsa_eur_sec = ( + determine_min_hp_cap_buildings_pypsa_eur_sec( + peak_load_100RE, + buildings_decentral_heating, + ) + ) + + # ################ aggregated heat profiles ################### + logger.debug(f"MVGD={mvgd} | Aggregate heat profiles.") + + df_mvgd_ts_hp = df_heat_ts.loc[ + :, + buildings_decentral_heating, + ].sum(axis=1) + + df_heat_mvgd_ts = pd.DataFrame( + data={ + "carrier": "heat_pump", + "bus_id": mvgd, + "scenario": "eGon100RE", + "dist_aggregated_mw": [df_mvgd_ts_hp.to_list()], + } + ) + + # ################ collect results ################## + logger.debug(f"MVGD={mvgd} | Collect results.") + + df_peak_loads_db = pd.concat( + [df_peak_loads_db, peak_load_100RE.reset_index()], + axis=0, + ignore_index=True, + ) + + df_heat_mvgd_ts_db = pd.concat( + [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True + ) + + df_hp_min_cap_mv_grid_pypsa_eur_sec.loc[ + mvgd + ] = hp_min_cap_mv_grid_pypsa_eur_sec + + # ################ export to db and csv ###################### + logger.debug(" Write data to db.") + export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db) + + logger.debug("Write pypsa-eur-sec min HP capacities to csv.") + export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec) + + +def create_peak_load_table(): + + BuildingHeatPeakLoads.__table__.drop(bind=engine, checkfirst=True) + BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) + + +def create_hp_capacity_table(): + + EgonHpCapacityBuildings.__table__.drop(bind=engine, checkfirst=True) + EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) + + +def create_egon_etrago_timeseries_individual_heating(): + + EgonEtragoTimeseriesIndividualHeating.__table__.drop( + bind=engine, checkfirst=True + ) + EgonEtragoTimeseriesIndividualHeating.__table__.create( + bind=engine, checkfirst=True + ) + + +def delete_peak_loads_if_existing(): + """Remove all entries""" + with db.session_scope() as session: + # Buses + session.query(BuildingHeatPeakLoads).filter( + BuildingHeatPeakLoads.scenario == "eGon2035" + ).delete(synchronize_session=False)