Skip to content

Commit

Permalink
Add logging
Browse files Browse the repository at this point in the history
  • Loading branch information
nailend committed Oct 12, 2022
1 parent bb28eae commit 1097566
Showing 1 changed file with 48 additions and 33 deletions.
81 changes: 48 additions & 33 deletions src/egon/data/datasets/heat_supply/individual_heating.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def dyn_parallel_tasks():
op_kwargs={
"n": i,
"max_n": parallel_tasks,
"func": determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec,
"func": determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec, # noqa: E501
},
)
)
Expand Down Expand Up @@ -431,8 +431,8 @@ def get_peta_demand(mvgd, scenario):
Returns
-------
df_peta_demand : pd.DataFrame
Annual residential heat demand per building and scenario. Columns of the
dataframe are zensus_population_id and demand.
Annual residential heat demand per building and scenario. Columns of
the dataframe are zensus_population_id and demand.
"""

Expand Down Expand Up @@ -473,9 +473,9 @@ def get_residential_heat_profile_ids(mvgd):
Returns
-------
df_profiles_ids : pd.DataFrame
Residential daily heat profile ID's per building. Columns of the dataframe
are zensus_population_id, building_id, selected_idp_profiles, buildings
and day_of_year.
Residential daily heat profile ID's per building. Columns of the
dataframe are zensus_population_id, building_id,
selected_idp_profiles, buildings and day_of_year.
"""
with db.session_scope() as session:
Expand Down Expand Up @@ -524,8 +524,8 @@ def get_daily_profiles(profile_ids):
Returns
-------
df_profiles : pd.DataFrame
Residential daily heat profiles. Columns of the dataframe are idp, house,
temperature_class and hour.
Residential daily heat profiles. Columns of the dataframe are idp,
house, temperature_class and hour.
"""
saio.register_schema("demand", db.engine())
Expand Down Expand Up @@ -884,8 +884,8 @@ def get_buildings_with_decentral_heat_demand_in_mv_grid(mvgd, scenario):
Returns
--------
pd.Index(int)
Building IDs (as int) of buildings with decentral heating system in given
MV grid. Type is pandas Index to avoid errors later on when it is
Building IDs (as int) of buildings with decentral heating system in
given MV grid. Type is pandas Index to avoid errors later on when it is
used in a query.
"""
Expand Down Expand Up @@ -957,8 +957,8 @@ def get_heat_peak_demand_per_building(scenario, building_ids):
session.query(
BuildingHeatPeakLoads.building_id,
BuildingHeatPeakLoads.peak_load_in_w,
).filter(BuildingHeatPeakLoads.scenario == scenario)
# .filter(BuildingHeatPeakLoads.sector == "both")
)
.filter(BuildingHeatPeakLoads.scenario == scenario)
.filter(BuildingHeatPeakLoads.building_id.in_(building_ids))
)

Expand Down Expand Up @@ -1248,16 +1248,18 @@ def determine_hp_cap_buildings_eGon100RE_per_mvgd(mv_grid_id):
mv_grid_id, scenario="eGon100RE"
)

# TODO get peak demand from db
logger.info(f"MVGD={mv_grid_id} | Get peak loads from DB")
df_peak_heat_demand = get_heat_peak_demand_per_building(
"eGon100RE", building_ids
)

logger.info(f"MVGD={mv_grid_id} | Determine HP capacities.")
# determine minimum required heat pump capacity per building
min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(
df_peak_heat_demand, flexibility_factor=24 / 18, cop=1.7
)

logger.info(f"MVGD={mv_grid_id} | Desaggregate HP capacities.")
# distribute total heat pump capacity to all buildings with HP
hp_cap_per_building = desaggregate_hp_capacity(
min_hp_cap_buildings, hp_cap_grid
Expand Down Expand Up @@ -1292,12 +1294,15 @@ def determine_hp_cap_buildings_eGon100RE():
)
mvgd_ids = pd.read_sql(query.statement, query.session.bind, index_col=None)
mvgd_ids = mvgd_ids.sort_values("bus_id")
mvgd_ids = mvgd_ids["bus_id"].values

df_hp_cap_per_building_100RE_db = pd.DataFrame(
columns=["building_id", "hp_capacity"]
)

for mvgd_id in mvgd_ids["bus_id"].values:
for mvgd_id in mvgd_ids:

logger.info(f"MVGD={mvgd_id} | Start")

hp_cap_per_building_100RE = (
determine_hp_cap_buildings_eGon100RE_per_mvgd(mvgd_id)
Expand All @@ -1312,6 +1317,7 @@ def determine_hp_cap_buildings_eGon100RE():
axis=0,
)

logger.info(f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write data to db.")
df_hp_cap_per_building_100RE_db["scenario"] = "eGon100RE"

write_table_to_postgres(
Expand All @@ -1324,7 +1330,8 @@ def determine_hp_cap_buildings_eGon100RE():

def aggregate_residential_and_cts_profiles(mvgd, scenario):
"""
Gets residential and CTS heat demand profiles per building and aggregates them.
Gets residential and CTS heat demand profiles per building and aggregates
them.
Parameters
----------
Expand All @@ -1336,8 +1343,8 @@ def aggregate_residential_and_cts_profiles(mvgd, scenario):
Returns
--------
pd.DataFrame
Table of demand profile per building. Column names are building IDs and index
is hour of the year as int (0-8759).
Table of demand profile per building. Column names are building IDs and
index is hour of the year as int (0-8759).
"""
# ############### get residential heat demand profiles ###############
Expand Down Expand Up @@ -1450,7 +1457,7 @@ def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):

for mvgd in mvgd_ids:

logger.debug(f"MVGD={mvgd} | Start")
logger.info(f"MVGD={mvgd} | Start")

# ############# aggregate residential and CTS demand profiles #####

Expand All @@ -1459,12 +1466,12 @@ def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):
)

# ##################### determine peak loads ###################
logger.debug(f"MVGD={mvgd} | Determine peak loads.")
logger.info(f"MVGD={mvgd} | Determine peak loads.")

peak_load = df_heat_ts.max().rename("eGon2035")

# ######## determine HP capacity per building #########
logger.debug(f"MVGD={mvgd} | Determine HP capacities.")
logger.info(f"MVGD={mvgd} | Determine HP capacities.")

buildings_decentral_heating = (
get_buildings_with_decentral_heat_demand_in_mv_grid(
Expand All @@ -1483,7 +1490,7 @@ def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):
)

# ################ aggregated heat profiles ###################
logger.debug(f"MVGD={mvgd} | Aggregate heat profiles.")
logger.info(f"MVGD={mvgd} | Aggregate heat profiles.")

df_mvgd_ts_2035_hp = df_heat_ts.loc[
:,
Expand All @@ -1506,7 +1513,7 @@ def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):
)

# ################ collect results ##################
logger.debug(f"MVGD={mvgd} | Collect results.")
logger.info(f"MVGD={mvgd} | Collect results.")

df_peak_loads_db = pd.concat(
[df_peak_loads_db, peak_load.reset_index()],
Expand All @@ -1527,7 +1534,7 @@ def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):
)

# ################ export to db #######################
logger.debug(" Write data to db.")
logger.info(f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write data to db.")
export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db)

df_hp_cap_per_building_2035_db["scenario"] = "eGon2035"
Expand All @@ -1541,9 +1548,9 @@ def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):

def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):
"""
Main function to determine minimum required HP capacity in MV for pypsa-eur-sec.
Further, creates heat demand time series for all buildings with heat pumps in MV
grid in eGon100RE scenario, used in eTraGo.
Main function to determine minimum required HP capacity in MV for
pypsa-eur-sec. Further, creates heat demand time series for all buildings
with heat pumps in MV grid in eGon100RE scenario, used in eTraGo.
Parameters
-----------
Expand All @@ -1563,7 +1570,7 @@ def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):

for mvgd in mvgd_ids:

logger.debug(f"MVGD={mvgd} | Start")
logger.info(f"MVGD={mvgd} | Start")

# ############# aggregate residential and CTS demand profiles #####

Expand All @@ -1572,7 +1579,7 @@ def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):
)

# ##################### determine peak loads ###################
logger.debug(f"MVGD={mvgd} | Determine peak loads.")
logger.info(f"MVGD={mvgd} | Determine peak loads.")

peak_load_100RE = df_heat_ts.max().rename("eGon100RE")

Expand All @@ -1590,7 +1597,7 @@ def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):
)

# ################ aggregated heat profiles ###################
logger.debug(f"MVGD={mvgd} | Aggregate heat profiles.")
logger.info(f"MVGD={mvgd} | Aggregate heat profiles.")

df_mvgd_ts_hp = df_heat_ts.loc[
:,
Expand All @@ -1607,7 +1614,7 @@ def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):
)

# ################ collect results ##################
logger.debug(f"MVGD={mvgd} | Collect results.")
logger.info(f"MVGD={mvgd} | Collect results.")

df_peak_loads_db = pd.concat(
[df_peak_loads_db, peak_load_100RE.reset_index()],
Expand All @@ -1624,10 +1631,14 @@ def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):
] = hp_min_cap_mv_grid_pypsa_eur_sec

# ################ export to db and csv ######################
logger.debug(" Write data to db.")
logger.info(f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write data to db.")
export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db)

logger.debug("Write pypsa-eur-sec min HP capacities to csv.")
logger.info(
f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write "
f"pypsa-eur-sec min "
f"HP capacities to csv."
)
export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec)


Expand All @@ -1649,7 +1660,11 @@ def split_mvgds_into_bulks(n, max_n, func):

mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True)

mvgd_ids = np.array_split(mvgd_ids["bus_id"].values, parallel_tasks)
mvgd_ids = np.array_split(mvgd_ids["bus_id"].values, max_n)
# Only take split n
mvgd_ids = mvgd_ids[n]

logger.info(f"Bulk takes care of MVGD: {min(mvgd_ids)} - {max(mvgd_ids)}")
func(mvgd_ids)


Expand Down

0 comments on commit 1097566

Please sign in to comment.