Skip to content

Commit

Permalink
Merge pull request #22 from zsarnoczay/master
Browse files Browse the repository at this point in the history
update to v2.6.1
  • Loading branch information
zsarnoczay authored Oct 6, 2021
2 parents 50a9cb2 + 7ca0072 commit 15f5b52
Show file tree
Hide file tree
Showing 10 changed files with 436 additions and 268 deletions.
2 changes: 1 addition & 1 deletion pelicun/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

name = "pelicun"

__version__ = '2.6.0'
__version__ = '2.6.1'

__copyright__ = """Copyright (c) 2018 Leland Stanford Junior University and
The Regents of the University of California"""
Expand Down
4 changes: 2 additions & 2 deletions pelicun/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def auto_populate(DL_input_path, EDP_input_path,
DL_ap['LossModel']['Inhabitants'].update({'EventTime': event_time})

# assemble the extended DL input
DL_input.update({'BIM_Inferred': BIM_ap})
DL_input['GeneralInformation'].update(BIM_ap)
DL_input.update({'DamageAndLoss': DL_ap})

# save it to the DL file with the ap suffix
Expand Down Expand Up @@ -500,7 +500,7 @@ def auto_populate(DL_input_path, EDP_input_path,
}
}

if (('Inhabitants' in DL_ap['LossModel'].keys()) and
if (('Inhabitants' in loss_dict['LossModel'].keys()) and
(event_time is not None)):
loss_dict['LossModel']['Inhabitants'].update({'EventTime': event_time})

Expand Down
18 changes: 15 additions & 3 deletions pelicun/control.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def calculate_losses(self):
log_msg('Calculating losses...')
self._DV_dict = {}

def save_outputs(self, output_path, EDP_file, DM_file, DV_file,
def save_outputs(self, output_path, BIM_file, EDP_file, DM_file, DV_file,
suffix="", detailed_results=True):
"""
Export the results.
Expand Down Expand Up @@ -459,6 +459,11 @@ def replace_FG_IDs_with_FG_names(df):
stats_only=True)

#if True:
# create the BIM file
log_msg('\t\tSimCenter BIM file')
write_SimCenter_BIM_output(
output_path, suffix + BIM_file, self._AIM_in['GI'])

# create the EDP file
if self._assessment_type.startswith('HAZUS'):
log_msg('\t\tSimCenter EDP file')
Expand Down Expand Up @@ -3247,7 +3252,7 @@ def aggregate_results(self):

# reconstruction cost
if DVs['rec_cost']:
if self._hazard == 'HU'and ('PWS' in self._EDP_in) and ('PIH' in self._EDP_in):
if self._hazard == 'HU' and ('PWS' in self._EDP_in) and ('PIH' in self._EDP_in):
# if running hurricane with combined wind and flood hazard
# individual losses
indiv_loss = self._DV_dict['rec_cost'].groupby(level=[0], axis=1).sum()
Expand All @@ -3262,8 +3267,15 @@ def aggregate_results(self):
tmp1 = (loss_weight[0][i] * rlz[0]) / 100.
tmp2 = (loss_weight[1][i] * rlz[1]) / 100.
combined_loss.append(np.min([100., (np.sum(tmp1 + tmp2) - tmp1.T.dot(tmp2))* 100.]))
SUMMARY.loc[ncID, ('reconstruction', 'cost')] = combined_loss
# convert to loss ratio
# combined
SUMMARY.loc[ncID, ('reconstruction', 'cost')] = [x / 100.0 for x in combined_loss]
# individual
self._DV_dict['rec_cost'] = self._DV_dict['rec_cost'] / 100.0
else:
# convert to loss ratio
if self._hazard == 'HU':
self._DV_dict['rec_cost'] = self._DV_dict['rec_cost'] / 100.0
SUMMARY.loc[ncID, ('reconstruction', 'cost')] = \
self._DV_dict['rec_cost'].sum(axis=1)

Expand Down
188 changes: 120 additions & 68 deletions pelicun/file_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@

import json, posixpath

from tables.exceptions import HDF5ExtError
from time import sleep


Expand Down Expand Up @@ -133,6 +132,75 @@ def process_loc(string, stories):
else:
return None

def get_required_resources(input_path, assessment_type):
"""
List the data files required to perform an assessment.
It extracts the information from the config file about the methods and
functional data required for the analysis and provides a list of paths to
the files that would be used.
This method is helpful in an HPC context to copy the required resources to
the local node from the shared file storage.
Parameters
----------
input_path: string
Location of the DL input json file.
assessment_type: {'P58', 'HAZUS_EQ', 'HAZUS_HU'}
Identifies the default databases based on the type of assessment.
Returns
-------
resources: list of strings
A list of paths to the required resource files.
"""

resources = {}

AT = assessment_type

with open(input_path, 'r') as f:
jd = json.load(f)

DL_input = jd['DamageAndLoss']

loss = DL_input.get('LossModel', None)
if loss is not None:
inhabitants = loss.get('Inhabitants', None)
dec_vars = loss.get('DecisionVariables', None)

if dec_vars is not None:
injuries = bool(dec_vars.get('Injuries', False))
else:
inhabitants = None
dec_vars = None
injuries = False

# check if the user specified custom data sources
path_CMP_data = DL_input.get("ComponentDataFolder", "")

if path_CMP_data == "":
# Use the P58 path as default
path_CMP_data = pelicun_path + CMP_data_path[AT]

resources.update({'component': path_CMP_data})

# HAZUS combination of flood and wind losses
if ((AT == 'HAZUS_HU') and (DL_input.get('Combinations', None) is not None)):
path_combination_data = pelicun_path + CMP_data_path['HAZUS_MISC']
resources.update({'combination': path_combination_data})

# The population data is only needed if we are interested in injuries
if inhabitants is not None:
path_POP_data = inhabitants.get("PopulationDataFile", "")
else:
path_POP_data = ""

if ((injuries) and (path_POP_data == "")):
path_POP_data = pelicun_path + POP_data_path[AT]
resources.update({'population': path_POP_data})

return resources

def read_SimCenter_DL_input(input_path, assessment_type='P58', verbose=False):
"""
Expand Down Expand Up @@ -272,8 +340,9 @@ def read_SimCenter_DL_input(input_path, assessment_type='P58', verbose=False):
comb = DL_input.get('Combinations', None)
path_combination_data = pelicun_path
if comb is not None:
if AT == 'HAZUS_HU':
path_combination_data += CMP_data_path['HAZUS_MISC']
path_combination_data = DL_input.get('CombinationDataFile', None)
if path_combination_data is None:
path_combination_data = pelicun_path + CMP_data_path['HAZUS_MISC']
data['data_sources'].update({'path_combination_data': path_combination_data})
data['loss_combination'] = comb

Expand All @@ -291,6 +360,7 @@ def read_SimCenter_DL_input(input_path, assessment_type='P58', verbose=False):

# general information
GI = jd.get("GeneralInformation", None)
data['GI'] = GI

# units
if (GI is not None) and ('units' in GI.keys()):
Expand Down Expand Up @@ -968,28 +1038,15 @@ def read_population_distribution(path_POP, occupancy, assessment_type='P58',
# else if an HDF5 file is provided
elif path_POP.endswith('hdf'):

# this for loop is needed to avoid issues from race conditions on HPC
for i in range(1000):
try:
store = pd.HDFStore(path_POP)
store.open()

except HDF5ExtError:
pop_table = None
sleep(0.01)
continue

else:
pop_table = store.select('pop',
where=f'index in {[occupancy, ]}')
store.close()
break
store = pd.HDFStore(path_POP)
store.open()
pop_table = store.select('pop', where=f'index in {[occupancy, ]}')
store.close()

if pop_table is not None:
data = convert_Series_to_dict(pop_table.loc[occupancy, :])
else:
raise IOError("Couldn't read the HDF file for POP data after 20 "
"tries because it was blocked by other processes.")
raise IOError("Couldn't read the HDF file for POP data.")

# convert peak population to persons/m2
if 'peak' in data.keys():
Expand Down Expand Up @@ -1038,24 +1095,17 @@ def read_combination_DL_data(path_combination_data, comp_info, assessment_type='
## TODO: hdf type
elif path_combination_data.endswith('hdf'):
for c_id in comp_info:
for i in range(4000):
try:
store = pd.HDFStore(path_combination_data)
store.open()
except HDF5ExtError:
comb_data_table = None
sleep(0.1)
continue
else:
comb_data_table = store['HAZUS Subassembly Loss Ratio']
store.close()
break

store = pd.HDFStore(path_combination_data)
store.open()
comb_data_table = store['HAZUS Subassembly Loss Ratio']
store.close()

if comb_data_table is not None:
comb_data_dict.update(
{c_id: {'LossRatio': comb_data_table[c_id].tolist()}})
else:
raise IOError("Couldn't read the HDF file for DL data after 20 "
"tries because it was blocked by other processes.")
raise IOError("Couldn't read the HDF file for combination data.")

return comb_data_dict

Expand Down Expand Up @@ -1142,53 +1192,30 @@ def read_component_DL_data(path_CMP, comp_info, assessment_type='P58', avail_edp
path_CMP_m = path_CMP.replace('.hdf','_FL.hdf') # flood DL
else:
path_CMP_m = path_CMP.replace('.hdf','_HU.hdf') # wind DL
# this for loop is needed to avoid issues from race conditions on HPC
for i in range(10000):
try:
store = pd.HDFStore(path_CMP_m)
store.open()

except HDF5ExtError:
CMP_table = None
sleep(0.1)
continue

else:
CMP_table = store.select('data', where=f'index in {c_id}')
store.close()
break
store = pd.HDFStore(path_CMP_m)
store.open()
CMP_table = store.select('data', where=f'index in {c_id}')
store.close()

if CMP_table is not None:
DL_data_dict.update(
{c_id: convert_Series_to_dict(CMP_table.loc[c_id, :])})
else:
raise IOError("Couldn't read the HDF file for DL data after iterative "
"tries because it was blocked by other processes.")
raise IOError("Couldn't read the HDF file for DL data.")
else:
# this for loop is needed to avoid issues from race conditions on HPC
for i in range(1000):
try:
store = pd.HDFStore(path_CMP)
store.open()

except HDF5ExtError:
CMP_table = None
sleep(0.1)
continue

else:
CMP_table = store.select('data', where=f'index in {s_cmp_keys}')
store.close()
break
store = pd.HDFStore(path_CMP)
store.open()
CMP_table = store.select('data', where=f'index in {s_cmp_keys}')
store.close()

if CMP_table is not None:
for c_id in s_cmp_keys:
DL_data_dict.update(
{c_id: convert_Series_to_dict(CMP_table.loc[c_id, :])})
else:
raise IOError("Couldn't read the HDF file for DL data after 20 "
"tries because it was blocked by other processes.")

raise IOError("Couldn't read the HDF file for DL data.")

else:
raise ValueError(
Expand Down Expand Up @@ -1437,6 +1464,31 @@ def write_SimCenter_DL_output(output_dir, output_filename, output_df, index_name
#with open(file_path[:-3]+'zip', 'w') as f:
# output_df.to_csv(f, compression=dict(mehtod='zip', archive_name=output_filename))

def write_SimCenter_BIM_output(output_dir, BIM_filename, BIM_dict):

#flatten the dictionary
BIM_flat_dict = {}
for key, item in BIM_dict.items():
if isinstance(item, dict):
for sub_key, sub_item in item.items():
BIM_flat_dict.update({f'{key}_{sub_key}': sub_item})
else:
BIM_flat_dict.update({key: [item,]})

# create the output DF
#BIM_flat_dict.update({"index": [0,]})
for header_to_remove in ['geometry', 'Footprint']:
try:
BIM_flat_dict.pop(header_to_remove)
except:
pass

df_res = pd.DataFrame.from_dict(BIM_flat_dict)

df_res.dropna(axis=1, how='all', inplace=True)

df_res.to_csv('BIM.csv')

def write_SimCenter_EDP_output(output_dir, EDP_filename, EDP_df):

# initialize the output DF
Expand Down
15 changes: 14 additions & 1 deletion pelicun/tests/resources/io testing/ref/ref_DL_input_full.json
Original file line number Diff line number Diff line change
Expand Up @@ -148,5 +148,18 @@
"cost_and_time": false,
"injury_lvls": true
},
"damage_logic": null
"damage_logic": null,
"GI": {
"height": 8,
"PlanArea": 111.484,
"NumberOfStories": 3,
"type": "W1",
"units": {
"force": "N",
"length": "m",
"temperature": "C",
"time": "sec"
},
"year": 1945
}
}
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"general": {
"GI": {
"plan_area": 111.484,
"stories": 3,
"realizations": 20000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,5 +109,15 @@
"rec_times": "IND",
"red_tags": "IND"
},
"damage_logic": null
"damage_logic": null,
"GI": {
"PlanArea": 100.0,
"NumberOfStories": 3,
"units": {
"force": "N",
"length": "m",
"temperature": "C",
"time": "sec"
}
}
}
6 changes: 5 additions & 1 deletion pelicun/tests/resources/io testing/ref/ref_DL_input_min.json
Original file line number Diff line number Diff line change
Expand Up @@ -76,5 +76,9 @@
"rec_times": "IND",
"red_tags": "IND"
},
"damage_logic": null
"damage_logic": null,
"GI": {
"PlanArea": 100.0,
"NumberOfStories": 3
}
}
Loading

0 comments on commit 15f5b52

Please sign in to comment.