diff --git a/.github/workflows/deploy_to_pypi.yaml b/.github/workflows/deploy_to_pypi.yaml
new file mode 100644
index 000000000..ff5577e4a
--- /dev/null
+++ b/.github/workflows/deploy_to_pypi.yaml
@@ -0,0 +1,32 @@
+name: Deploy to PyPI
+
+on:
+ release:
+ types: [created]
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+
+ - name: Set up Python
+ uses: actions/setup-python@v3
+ with:
+ python-version: '3.10'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install setuptools wheel twine
+
+ - name: Build package
+ run: |
+ python setup.py sdist bdist_wheel
+
+ - name: Publish package to PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ user: zsarnoczay
+ password: ${{ secrets.PELICUN_GITHUB_TOKEN }}
diff --git a/pelicun/__init__.py b/pelicun/__init__.py
index 920942f12..a67708efa 100644
--- a/pelicun/__init__.py
+++ b/pelicun/__init__.py
@@ -41,7 +41,7 @@
name = "pelicun"
-__version__ = '3.2b9'
+__version__ = '3.2'
__copyright__ = ("Copyright (c) 2018 Leland Stanford "
"Junior University and The Regents "
diff --git a/pelicun/assessment.py b/pelicun/assessment.py
index de4f1fd38..f3c8dba37 100644
--- a/pelicun/assessment.py
+++ b/pelicun/assessment.py
@@ -90,7 +90,7 @@ def __init__(self, config_options=None):
self.options = base.Options(config_options, self)
- self.unit_conversion_factors = file_io.parse_units(
+ self.unit_conversion_factors = base.parse_units(
self.options.units_file)
self.log = self.options.log
diff --git a/pelicun/base.py b/pelicun/base.py
index 030b9fdaa..67451cd95 100644
--- a/pelicun/base.py
+++ b/pelicun/base.py
@@ -57,12 +57,16 @@
int_or_None
process_loc
dedupe_index
+ dict_raise_on_duplicates
+ parse_units
+ convert_units
Options
Logger
"""
+from __future__ import annotations
import os
import sys
from datetime import datetime
@@ -1076,3 +1080,180 @@ def dedupe_index(dataframe, dtype=str):
# Placeholder for advanced calculations
'One': 'ONE'
}
+
+
+def dict_raise_on_duplicates(ordered_pairs):
+ """
+ Reject duplicate keys.
+
+ https://stackoverflow.com/questions/14902299/
+ json-loads-allows-duplicate-keys-
+ in-a-dictionary-overwriting-the-first-value
+
+ """
+ d = {}
+ for k, v in ordered_pairs:
+ if k in d:
+ raise ValueError(f"duplicate key: {k}")
+ d[k] = v
+ return d
+
+
+def parse_units(custom_file=None, preserve_categories=False):
+ """
+ Parse the unit conversion factor JSON file and return a dictionary.
+
+ Parameters
+ ----------
+ custom_file: str, optional
+ If a custom file is provided, only the units specified in the
+ custom file are used.
+
+ Raises
+ ------
+ KeyError
+ If a key is defined twice.
+ ValueError
+ If a unit conversion factor is not a float.
+ FileNotFoundError
+ If a file does not exist.
+ Exception
+ If a file does not have the JSON format.
+ """
+
+ def get_contents(file_path, preserve_categories=False):
+ try:
+ with open(file_path, 'r', encoding='utf-8') as f:
+ dictionary = json.load(f, object_pairs_hook=dict_raise_on_duplicates)
+ except FileNotFoundError as exc:
+ raise FileNotFoundError(f'{file_path} was not found.') from exc
+ except json.decoder.JSONDecodeError as exc:
+ raise ValueError(f'{file_path} is not a valid JSON file.') from exc
+ for category_dict in list(dictionary.values()):
+ # ensure all first-level keys point to a dictionary
+ if not isinstance(category_dict, dict):
+ raise ValueError(
+ f'{file_path} contains first-level keys '
+ 'that don\'t point to a dictionary'
+ )
+ # convert values to float
+ for key, val in category_dict.items():
+ try:
+ category_dict[key] = float(val)
+ except (ValueError, TypeError) as exc:
+ raise type(exc)(
+ f'Unit {key} has a value of {val} '
+ 'which cannot be interpreted as a float'
+ ) from exc
+
+ if preserve_categories:
+ return dictionary
+
+ flattened = {}
+ for category in dictionary:
+ for unit_name, factor in dictionary[category].items():
+ if unit_name in flattened:
+ raise ValueError(f'{unit_name} defined twice in {file_path}.')
+ flattened[unit_name] = factor
+
+ return flattened
+
+ if custom_file:
+ return get_contents(custom_file, preserve_categories)
+
+ return get_contents(
+ pelicun_path / "settings/default_units.json", preserve_categories
+ )
+
+
+def convert_units(
+ values: (float | list[float] | np.ndarray),
+ unit: str,
+ to_unit: str,
+ category: (str | None) = None
+) -> (float | list[float] | np.ndarray):
+ """
+ Converts numeric values between different units.
+
+ Supports conversion within a specified category of units and
+ automatically infers the category if not explicitly provided. It
+ maintains the type of the input in the output.
+
+ Parameters
+ ----------
+ values (float | list[float] | np.ndarray):
+ The numeric value(s) to convert.
+ unit (str):
+ The current unit of the values.
+ to_unit (str):
+ The target unit to convert the values into.
+ category (Optional[str]):
+ The category of the units (e.g., 'length', 'pressure'). If not
+ provided, the category will be inferred based on the provided
+ units.
+
+ Returns
+ -------
+ (float | list[float] | np.ndarray):
+ The converted value(s) in the target unit, in the same data type
+ as the input values.
+
+ Raises
+ ------
+ TypeError:
+ If the input `values` are not of type float, list, or
+ np.ndarray.
+ ValueError:
+ If the `unit`, `to_unit`, or `category` is unknown or if `unit`
+ and `to_unit` are not in the same category.
+
+ """
+
+ if isinstance(values, (float, list)):
+ vals = np.atleast_1d(values)
+ elif isinstance(values, np.ndarray):
+ vals = values
+ else:
+ raise TypeError('Invalid input type for `values`')
+
+ # load default units
+ all_units = parse_units(preserve_categories=True)
+
+ # if a category is given use it, otherwise try to determine it
+ if category:
+ if category not in all_units:
+ raise ValueError(f'Unknown category: `{category}`')
+ units = all_units[category]
+ for unt in unit, to_unit:
+ if unt not in units:
+ raise ValueError(
+ f'Unknown unit: `{unt}`'
+ )
+ else:
+ unit_category: (str | None) = None
+ for key in all_units:
+ units = all_units[key]
+ if unit in units:
+ unit_category = key
+ break
+ if not unit_category:
+ raise ValueError(f'Unknown unit `{unit}`')
+ units = all_units[unit_category]
+ if to_unit not in units:
+ raise ValueError(
+ f'`{unit}` is a `{unit_category}` unit, but `{to_unit}` '
+ f'is not specified in that category.'
+ )
+
+ # convert units
+ from_factor = units[unit]
+ to_factor = units[to_unit]
+ new_values = vals * from_factor / to_factor
+
+ # return the results in the same type as that of the provided
+ # values
+ if isinstance(values, float):
+ return new_values[0]
+ if isinstance(values, list):
+ return new_values.tolist()
+ return new_values
diff --git a/pelicun/file_io.py b/pelicun/file_io.py
index 27f0174e8..0e0e8fc3e 100644
--- a/pelicun/file_io.py
+++ b/pelicun/file_io.py
@@ -47,20 +47,17 @@
.. autosummary::
- dict_raise_on_duplicates
get_required_resources
save_to_csv
load_data
load_from_file
- parse_units
"""
-import json
from pathlib import Path
import numpy as np
import pandas as pd
-from . import base
+from pelicun import base
convert_dv_name = {
@@ -94,23 +91,6 @@
}
-def dict_raise_on_duplicates(ordered_pairs):
- """
- Reject duplicate keys.
-
- https://stackoverflow.com/questions/14902299/
- json-loads-allows-duplicate-keys-
- in-a-dictionary-overwriting-the-first-value
-
- """
- d = {}
- for k, v in ordered_pairs:
- if k in d:
- raise ValueError(f"duplicate key: {k}")
- d[k] = v
- return d
-
-
def save_to_csv(data, filepath, units=None, unit_conversion_factors=None,
orientation=0, use_simpleindex=True, log=None):
"""
@@ -453,66 +433,3 @@ def load_from_file(filepath, log=None):
f'to load from csv: {filepath}')
return data
-
-
-def parse_units(custom_file=None):
- """
- Parse the unit conversion factor JSON file and return a dictionary.
-
- Parameters
- ----------
- custom_file: str, optional
- If a custom file is provided, only the units specified in the
- custom file are used.
-
- Raises
- ------
- KeyError
- If a key is defined twice.
- ValueError
- If a unit conversion factor is not a float.
- FileNotFoundError
- If a file does not exist.
- Exception
- If a file does not have the JSON format.
- """
-
- def get_contents(file_path):
- try:
- with open(file_path, 'r', encoding='utf-8') as f:
- dictionary = json.load(
- f, object_pairs_hook=dict_raise_on_duplicates)
- except FileNotFoundError as exc:
- raise FileNotFoundError(
- f'{file_path} was not found.') from exc
- except json.decoder.JSONDecodeError as exc:
- raise ValueError(
- f'{file_path} is not a valid JSON file.') from exc
- for category_dict in list(dictionary.values()):
- # ensure all first-level keys point to a dictionary
- if not isinstance(category_dict, dict):
- raise ValueError(
- f'{file_path} contains first-level keys '
- 'that don\'t point to a dictionary')
- # convert values to float
- for key, val in category_dict.items():
- try:
- category_dict[key] = float(val)
- except (ValueError, TypeError) as exc:
- raise type(exc)(
- f'Unit {key} has a value of {val} '
- 'which cannot be interpreted as a float') from exc
-
- flattened = {}
- for category in dictionary:
- for unit_name, factor in dictionary[category].items():
- if unit_name in flattened:
- raise ValueError(f'{unit_name} defined twice in {file_path}.')
- flattened[unit_name] = factor
-
- return flattened
-
- if custom_file:
- return get_contents(custom_file)
-
- return get_contents(base.pelicun_path / "settings/default_units.json")
diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_water.csv b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_water.csv
new file mode 100644
index 000000000..ae88d898e
--- /dev/null
+++ b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_water.csv
@@ -0,0 +1,5 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-DamageStateWeights
+PWP.B.GS,0,Peak Ground Velocity,cmps,0,0,multilinear_CDF,"0.1,2,3,4,5,6,8,10,15,20,30,40,50,60,80,100,150,200,300,400,500,579|0.00,0.00000290,0.00000722,0.00001379,0.00002279,0.00003435,0.00006561,0.00010840,0.00026993,0.00051566,0.00128401,0.00245290,0.00405254,0.00610781,0.01166803,0.01927724,0.04800100,0.09169855,0.22833253,0.43619425,0.72065511,1.00",0.8 | 0.2
+PWP.D.GS,0,Peak Ground Velocity,cmps,0,0,multilinear_CDF,"0.2,3,4,5,6,8,10,15,20,30,40,50,60,80,100,150,200,300,400,500,600,800,990|0.00,0.00000217,0.00000414,0.00000684,0.00001030,0.00001968,0.00003252,0.00008098,0.00015470,0.00038520,0.00073587,0.00121576,0.00183234,0.00350041,0.00578317,0.01440030,0.02750956,0.06849976,0.13085828,0.21619653,0.32584160,0.62247037,1.00",0.8 | 0.2
+PWP.B.GF,0,Permanent Ground Deformation,inch,0,0,multilinear_CDF,"0.008,0.1,0.2,0.3,0.4,0.5,0.6,0.8,1,2,3,4,5,6,8,10,15,20,30,40,50,60,80,100,150,200,300,400,500,600,800,1000,2000,3000,4000,5000,6000,8000,9050|0.00,0.00167898,0.00247527,0.00310622,0.00364921,0.00413493,0.00457941,0.00537991,0.00609600,0.00898715,0.01127802,0.01324947,0.01501302,0.01662684,0.01953329,0.02213324,0.02777513,0.03263035,0.04094802,0.04810592,0.05450899,0.06036840,0.07092108,0.08036093,0.10084539,0.11847365,0.14867326,0.17466205,0.19791019,0.21918441,0.25749888,0.29177290,0.43015181,0.53979998,0.63415957,0.71856848,0.79581049,0.93492194,1.00",0.2 | 0.8
+PWP.D.GF,0,Permanent Ground Deformation,inch,0,0,multilinear_CDF,"0.3,0.4,0.5,0.6,0.8,1,2,3,4,5,6,8,10,15,20,30,40,50,60,80,100,150,200,300,400,500,600,800,1000,2000,3000,4000,5000,6000,8000,10000,20000,30000,40000,50000,60000,80000|0.00,0.00109476,0.00124048,0.00137382,0.00161397,0.00182880,0.00269614,0.00338341,0.00397484,0.00450391,0.00498805,0.00585999,0.00663997,0.00833254,0.00978911,0.01228440,0.01443178,0.01635270,0.01811052,0.02127632,0.02410828,0.03025362,0.03554209,0.04460198,0.05239862,0.05937306,0.06575532,0.07724967,0.08753187,0.12904554,0.16193999,0.19024787,0.21557054,0.23874315,0.28047658,0.31780902,0.46853605,0.58796858,0.69074827,0.78268933,0.86682397,1.00",0.2 | 0.8
\ No newline at end of file
diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_SimCenter_HU.csv b/pelicun/resources/SimCenterDBDL/damage_DB_SimCenter_HU.csv
new file mode 100644
index 000000000..549d1b486
--- /dev/null
+++ b/pelicun/resources/SimCenterDBDL/damage_DB_SimCenter_HU.csv
@@ -0,0 +1,65 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS1-DamageStateWeights
+RWC.001,0,Peak Tension Force,kN,0,1,normal,5.84,0.1,
+RWC.STR.001,0,Peak Tension Force,kN,0,1,normal,5.338,0.3,
+RWC.TN.001a,0,Peak Tension Force,kN,0,1,normal,2.98,0.3,
+RWC.TN.001b,0,Peak Tension Force,kN,0,1,normal,1.92,0.23,
+RWC.TN.001c,0,Peak Tension Force,kN,0,1,normal,1.846,0.25,
+RWC.TN.STR.001,0,Peak Tension Force,kN,0,1,normal,7.31,0.1,
+RWC.TN.STR.002,0,Peak Tension Force,kN,0,1,normal,8.47,0.11,
+RWC.TN.STR.003,0,Peak Tension Force,kN,0,1,normal,14.35,0.1,
+RWC.TN.STR.004,0,Peak Tension Force,kN,0,1,normal,9.22,0.11,
+RWC.TN.STR.005,0,Peak Tension Force,kN,0,1,normal,8.56,0.11,
+RWC.TN.STR.006,0,Peak Tension Force,kN,0,1,normal,13.41,0.09,
+RWC.TN.STR.007,0,Peak Tension Force,kN,0,1,normal,12.21,0.12,
+RWC.EPX.001,0,Peak Tension Force,kN,0,1,normal,7.53,0.37,
+RWC.TN.EPX.001,0,Peak Tension Force,kN,0,1,normal,7.98,0.29,
+RWC.TN.EPX.002,0,Peak Tension Force,kN,0,1,normal,6.55,0.16,
+RWC.TN.EPX.003,0,Peak Tension Force,kN,0,1,normal,6.11,0.19,
+RWC.TN.EPX.004,0,Peak Tension Force,kN,0,1,normal,9.67,0.34,
+RWC.TN.ADH.001,0,Peak Tension Force,kN,0,1,normal,6.6,0.19,
+RWC.TN.ADH.002,0,Peak Tension Force,kN,0,1,normal,4.95,0.33,
+RWC.TN.ADH.003,0,Peak Tension Force,kN,0,1,normal,5.21,0.21,
+RWC.TN.ADH.004,0,Peak Tension Force,kN,0,1,normal,2.81,0.17,
+RWC.TN.ADH.005,0,Peak Tension Force,kN,0,1,normal,4.1,0.31,
+RWC.TN.ADH.006,0,Peak Tension Force,kN,0,1,normal,3.79,0.09,
+RWC.TN.002,0,Peak Tension Force,kN,0,1,normal,12.56,0.1,
+RWC.HCL.001a,0,Peak Tension Force,kN,0,1,normal,5.836,0.12,
+RWC.HCL.001b,0,Peak Tension Force,kN,0,1,normal,5.836,0.1,
+RWC.HCL.001c,0,Peak Tension Force,kN,0,1,normal,5.391,0.15,
+RWC.TN.003a,0,Peak Tension Force,kN,0,1,normal,1.557,0.16,
+RWC.TN.003b,0,Peak Tension Force,kN,0,1,normal,1.51,0.36,
+RWC.TN.004,0,Peak Tension Force,kN,0,1,normal,1.83,0.34,
+RWC.TN.005,0,Peak Tension Force,kN,0,1,normal,1.97,0.38,
+RWC.TN.006a,0,Peak Tension Force,kN,0,1,normal,1.56,0.164,
+RWC.TN.006b,0,Peak Tension Force,kN,0,1,normal,2.59,0.212,
+RWC.TN.006c,0,Peak Tension Force,kN,0,1,normal,2.69,0.155,
+WIN.001a,0,Peak Wind Pressure,kPa,0,1,normal,3.35,0.3,
+WIN.001b,0,Peak Wind Pressure,kPa,0,1,normal,2.4,0.2,
+WIN.002,0,Peak Wind Pressure,kPa,0,1,weibull,2.629,4.7,
+WIN.003,0,Peak Wind Pressure,kPa,0,1,weibull,1.853,4.8,
+WIN.004,0,Peak Wind Pressure,kPa,0,1,normal,6.7,0.3,
+WIN.005,0,Peak Wind Pressure,kPa,0,1,normal,5.03,0.3,
+WIN.006,0,Peak Wind Pressure,kPa,0,1,normal,2.61,0.25,
+WIN.007a,0,Peak Wind Pressure,kPa,0,1,normal,1.534,0.25,
+WIN.007b,0,Peak Wind Pressure,kPa,0,1,normal,2.454,0.25,
+WIN.008,0,Peak Wind Pressure,kPa,0,1,normal,4.602,0.25,
+WIN.009,0,Peak Wind Pressure,kPa,0,1,normal,5,0.2,
+WIN.010,0,Peak Wind Pressure,kPa,0,1,normal,3.33,0.2,
+WIN.011,0,Peak Wind Pressure,kPa,0,1,normal,2.5,0.2,
+WIN.012,0,Peak Wind Pressure,kPa,0,1,normal,1.78,0.2,
+DOR.REG.001a,0,Peak Wind Pressure,kPa,0,1,normal,3.59,0.3,
+DOR.REG.001b,0,Peak Wind Pressure,kPa,0,1,normal,3.6,0.2,
+DOR.REG.001c,0,Peak Wind Pressure,kPa,0,1,normal,2.394,0.2,
+DOR.REG.002,0,Peak Wind Pressure,kPa,0,1,normal,7.18,0.3,
+DOR.REG.003,0,Peak Wind Pressure,kPa,0,1,normal,5.41,0.3,
+DOR.REG.004a,0,Peak Wind Pressure,kPa,0,1,normal,2.45,0.25,
+DOR.REG.004b,0,Peak Wind Pressure,kPa,0,1,weibull,4.86,4.5,
+DOR.REG.005,0,Peak Wind Pressure,kPa,0,1,normal,4.79,0.2,
+DOR.GAR.001a,0,Peak Wind Pressure,kPa,0,1,normal,2.39,0.3,
+DOR.GAR.001b,0,Peak Wind Pressure,kPa,0,1,normal,2.5,0.2,
+DOR.GAR.001c,0,Peak Wind Pressure,kPa,0,1,normal,3.49,0.2,
+DOR.GAR.001d,0,Peak Wind Pressure,kPa,0,1,normal,1.436,0.2,
+DOR.GAR.002,0,Peak Wind Pressure,kPa,0,1,normal,3.59,0.3,
+DOR.GAR.003,0,Peak Wind Pressure,kPa,0,1,normal,2.49,0.2,
+DOR.GAR.004,0,Peak Wind Pressure,kPa,0,1,normal,0.479,0.2,
+DOR.GAR.005,0,Peak Wind Pressure,kPa,0,1,normal,0.958,0.2,
\ No newline at end of file
diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_SimCenter_HU.json b/pelicun/resources/SimCenterDBDL/damage_DB_SimCenter_HU.json
new file mode 100644
index 000000000..837e96746
--- /dev/null
+++ b/pelicun/resources/SimCenterDBDL/damage_DB_SimCenter_HU.json
@@ -0,0 +1,556 @@
+{
+ "_GeneralInformation": {
+ "ShortName": "SimCenter Hurricane Component Library",
+ "Description": "This dataset is a collection of component-level damage models from the literature and public resources. Every entry has one or more corresponding publications identified that provide more information about its calibration and application limits.",
+ "Version": "0.1",
+ "ComponentGroups": {
+ "RWC - Roof-Wall Connection": {
+ "RWC.EPX - Epoxy only": [],
+ "RWC.HCL - Hurricane Clip": [],
+ "RWC.STR - Straps only": [],
+ "RWC.TN - Toe nail": [
+ "RWC.TN.STR - Toe nail with straps",
+ "RWC.TN.EPX - Toe nail and epoxy",
+ "RWC.TN.ADH - Toe nail with adhesive"
+ ]
+ },
+ "WIN - Windows": [],
+ "DOR - Doors": [
+ "DOR.REG - Regular Doors",
+ "DOR.GAR - Garage Doors"
+ ]
+ }
+ },
+ "References":{
+ "canfield1991": "Canfield, L. R., Niu, S. H., & Liu, H. (1991). Uplift resistance of various rafter-wall connections. Forest products journal, 41(7-8), 27-34.",
+ "cheng2004": "Cheng, J. (2004). Testing and analysis of the toe-nailed connection in the residential roof-to-wall system. Forest products journal, 54(4).",
+ "dong2016": "Dong, Y., & Li, Y. (2016). Risk-based assessment of wood residential construction subjected to hurricane events considering indirect and environmental loss. Sustainable and Resilient Infrastructure, 1(1-2), 46-62.",
+ "gurley2005": "Gurley, K., Pinelli, J.-P., Subramanian, C., Cope, A., Zhang, L., Murphree, J., Artiles, A., Misra, P., Gulati, S., and Simiu, E. (2005). Florida Public Hurricane Loss Projection Model engineering team final report volume II: Predicting the vulnerability of typical residential buildings to hurricane damage. Technical report, International Hurricane Research Center, Florida International University",
+ "jain2020": "Jain, A., Bhusar, A. A., Roueche, D. B., & Prevatt, D. O. (2020). Engineering-based tornado damage assessment: numerical tool for assessing tornado vulnerability of residential structures. Frontiers in Built Environment, 6, 89.",
+ "li2006": "Li, Y., & Ellingwood, B. R. (2006). Hurricane damage to residential construction in the US: Importance of uncertainty modeling in risk assessment. Engineering structures, 28(7), 1009-1018. Li, Y., van de Lindt, J. W., Dao, T., Bjarnadottir, S., & Ahuja, A. (2012). Loss analysis for combined wind and surge in hurricanes. Natural hazards review, 13(1), 1-10.",
+ "peng2013": "Peng, Jiazhen. Modeling natural disaster risk management: Integrating the roles of insurance and retrofit and multiple stakeholder perspectives. University of Delaware, 2013.",
+ "reed1996": "Reed, T. D., Rosowsky, D. V., & Schiff, S. D. (1996). Roof rafter to top-plate connections in coastal residential construction. In International Wood Engineering Conference (pp. 4-458).",
+ "reed1997": "Reed, T. D., Rosowsky, D. V., & Schiff, S. D. (1997). Uplift capacity of light-frame rafter to top plate connections. Journal of architectural engineering, 3(4), 156-163.",
+ "shanmugam2009": "Shanmugam, B., Nielson, B. G., & Prevatt, D. O. (2009). Statistical and analytical models for roof components in existing light-framed wood structures. Engineering Structures, 31(11), 2607-2616.",
+ "vandelindt2013": "van de Lindt, J. W., Pei, S., Dao, T., Graettinger, A., Prevatt, D. O., Gupta, R., & Coulbourne, W. (2013). Dual-objective-based tornado design philosophy. Journal of Structural Engineering, 139(2), 251-263.",
+ "vickery2006": "Vickery, P. J., Skerlj, P. F., Lin, J., Twisdale Jr, L. A., Young, M. A., & Lavelle, F. M. (2006). HAZUS-MH hurricane model methodology. II: Damage and loss estimation. Natural Hazards Review, 7(2), 94-103."
+ },
+ "RWC.001": {
+ "Description": "Roof-Wall Connection",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "dong2016",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.STR.001": {
+ "Description": "Roof-Wall Connection with Straps",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "vickery2006",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.001a": {
+ "Description": "Roof-Wall Connection with Toe nails",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "peng2013",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.001b": {
+ "Description": "Roof-Wall Connection with Toe nails",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["reed1997","shanmugam2009"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.001c": {
+ "Description": "Roof-Wall Connection with Toe nails",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "vickery2006",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.001": {
+ "Description": "Roof-Wall Connection with Toe nails and a small strap inside",
+ "Comments": "The small strap is located on the inside of the wall.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.002": {
+ "Description": "Roof-Wall Connection with Toe nails and a small strap outside",
+ "Comments": "The small strap is located on the outside of the wall.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.003": {
+ "Description": "Roof-Wall Connection with Toe nails and small straps on both sides",
+ "Comments": "Two small straps are installed, one inside and one outside of the wall.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.004": {
+ "Description": "Roof-Wall Connection with Toe nails, a small strap outside, and plywood nailed.",
+ "Comments": "The small strap is located on the outside of the wall. A plywood is nailed to the plate on the wall as spacer.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.005": {
+ "Description": "Roof-Wall Connection with Toe nails, a small strap outside, and plywood not nailed.",
+ "Comments": "The small strap is located on the outside of the wall. A plywood is installed as spacer but not nailed to the plate on the wall.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.006": {
+ "Description": "Roof-Wall Connection with Toe nails and a large strap outside",
+ "Comments": "The large strap is located on the outside of the wall.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.STR.007": {
+ "Description": "Roof-Wall Connection with Toe nails and a large strap inside",
+ "Comments": "The large strap is located on the inside of the wall.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.EPX.001": {
+ "Description": "Roof-Wall Connection with Epoxy.",
+ "Comments": "Epoxy set for one week, no toe nails used.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.EPX.001": {
+ "Description": "Roof-Wall Connection with Toe nails and epoxy.",
+ "Comments": "Epoxy set for one week, toe-nailed when epoxy is wet.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.EPX.002": {
+ "Description": "Roof-Wall Connection with Toe nails and epoxy on small wood blocks.",
+ "Comments": "Epoxy set for one week.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.EPX.003": {
+ "Description": "Roof-Wall Connection with Toe nails and epoxy on small wood blocks.",
+ "Comments": "Epoxy set for 24-48 hrs.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.EPX.004": {
+ "Description": "Roof-Wall Connection with Toe nails and epoxy on large wood blocks.",
+ "Comments": "Epoxy set for one week.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.ADH.001": {
+ "Description": "Roof-Wall Connection with Toe nails and acrylic adhesive",
+ "Comments": "Adhesive set for one week, toe-nailed when adhesive is wet.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.ADH.002": {
+ "Description": "Roof-Wall Connection with Toe nails and soaked with acrylic adhesive.",
+ "Comments": "Wood first soaked for 24 hours, then toe-nailed when adhesive is wet.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.ADH.003": {
+ "Description": "Roof-Wall Connection with Toe nails and acrylic adhesive on small wood blocks.",
+ "Comments": "Adhesive set for one week.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.ADH.004": {
+ "Description": "Roof-Wall Connection with Toe nails and 1/4 inch acrylic adhesive bead",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.ADH.005": {
+ "Description": "Roof-Wall Connection with Toe nails and 1/2 inch acrylic adhesive bead",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.ADH.006": {
+ "Description": "Roof-Wall Connection with Toe nails and foaming polyurethane adhesive.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "reed1997",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.002": {
+ "Description": "Roof-Wall Connection with Toe nails and Simpson Strong-Tie H10.",
+ "Comments": "Simpson Strong-Tie H10 metal connector installed on every truss.",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "peng2013",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.HCL.001a": {
+ "Description": "Roof-Wall Connection with Hurricane Clip H2.5.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "vandelindt2013",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.HCL.001b": {
+ "Description": "Roof-Wall Connection with Hurricane Clip H2.5.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["li2006","canfield1991"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.HCL.001c": {
+ "Description": "Roof-Wall Connection with Hurricane Clip H2.5.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["li2006","canfield1991"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.003a": {
+ "Description": "Roof-Wall Connection with Toe nails, 2-16d layout.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "vandelindt2013",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.003b": {
+ "Description": "Roof-Wall Connection with Toe nails, 2-16d layout.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": "shanmugam2009",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.004": {
+ "Description": "Roof-Wall Connection with Toe nails, 3-8d layout.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["li2006","reed1996"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.005": {
+ "Description": "Roof-Wall Connection with Toe nails, 3-16d layout.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["jain2020","shanmugam2009"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.006a": {
+ "Description": "Roof-Wall Connection with Toe nails, 2-16d layout, box nails.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["shanmugam2009","cheng2004"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.006b": {
+ "Description": "Roof-Wall Connection with Toe nails, 2-16d layout, box nails.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["shanmugam2009","cheng2004"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "RWC.TN.006c": {
+ "Description": "Roof-Wall Connection with Toe nails, 2-16d layout, box nails.",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["shanmugam2009","cheng2004"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Tension from uplift"
+ }
+ }
+ }
+ },
+ "WIN.001a": {
+ "Description": "Windows - General fragility",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["peng2013","gurley2005"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Glass breaking"
+ }
+ }
+ }
+ },
+ "DOR.REG.001a": {
+ "Description": "Doors - General fragility",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["peng2013","gurley2005"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Door panel torn from hinges"
+ }
+ }
+ }
+ },
+ "DOR.GAR.001a": {
+ "Description": "Garage Door - General fragility",
+ "Comments": "...",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "Reference": ["peng2013","gurley2005"],
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Garage door torn from tracks"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pelicun/resources/auto/Hazus_Earthquake_IM.py b/pelicun/resources/auto/Hazus_Earthquake_IM.py
index 62e23b0d1..6c957ce74 100644
--- a/pelicun/resources/auto/Hazus_Earthquake_IM.py
+++ b/pelicun/resources/auto/Hazus_Earthquake_IM.py
@@ -1,539 +1,829 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2023 Leland Stanford Junior University
-# Copyright (c) 2023 The Regents of the University of California
-#
-# This file is part of pelicun.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# 3. Neither the name of the copyright holder nor the names of its contributors
-# may be used to endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# You should have received a copy of the BSD 3-Clause License along with
-# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-
-import pandas as pd
-
-ap_DesignLevel = {
- 1940: 'PC',
- 1940: 'LC',
- 1975: 'MC',
- 2100: 'HC'
-}
-
-ap_DesignLevel_W1 = {
- 0: 'PC',
- 0: 'LC',
- 1975: 'MC',
- 2100: 'HC'
-}
-
-ap_Occupancy = {
- 'Other/Unknown': 'RES3',
- 'Residential - Single-Family': 'RES1',
- 'Residential - Town-Home': 'RES3',
- 'Residential - Multi-Family': 'RES3',
- 'Residential - Mixed Use': 'RES3',
- 'Office': 'COM4',
- 'Hotel': 'RES4',
- 'School': 'EDU1',
- 'Industrial - Light': 'IND2',
- 'Industrial - Warehouse': 'IND2',
- 'Industrial - Heavy': 'IND1',
- 'Retail': 'COM1',
- 'Parking' : 'COM10'
-}
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2023 Leland Stanford Junior University
+# Copyright (c) 2023 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+
+import os
+import json
+import pandas as pd
+import pelicun
+
+ap_DesignLevel = {
+ 1940: 'PC',
+ 1940: 'LC',
+ 1975: 'MC',
+ 2100: 'HC'
+}
+
+ap_DesignLevel_W1 = {
+ 0: 'PC',
+ 0: 'LC',
+ 1975: 'MC',
+ 2100: 'HC'
+}
+
+ap_Occupancy = {
+ 'Other/Unknown': 'RES3',
+ 'Residential - Single-Family': 'RES1',
+ 'Residential - Town-Home': 'RES3',
+ 'Residential - Multi-Family': 'RES3',
+ 'Residential - Mixed Use': 'RES3',
+ 'Office': 'COM4',
+ 'Hotel': 'RES4',
+ 'School': 'EDU1',
+ 'Industrial - Light': 'IND2',
+ 'Industrial - Warehouse': 'IND2',
+ 'Industrial - Heavy': 'IND1',
+ 'Retail': 'COM1',
+ 'Parking' : 'COM10'
+}
+
+# Convert common length units
+def convertUnits(value, unit_in, unit_out):
+ aval_types = ['m', 'mm', 'cm', 'km', 'inch', 'ft', 'mile']
+ m = 1.
+ mm = 0.001 * m
+ cm = 0.01 * m
+ km = 1000 * m
+ inch = 0.0254 * m
+ ft = 12. * inch
+ mile = 5280. * ft
+ scale_map = {'m':m, 'mm':mm, 'cm':cm, 'km':km, 'inch':inch, 'ft':ft,\
+ 'mile':mile}
+ if (unit_in not in aval_types) or (unit_out not in aval_types):
+ print(f"The unit {unit_in} or {unit_out} are used in auto_population but not supported")
+ return
+ value = value*scale_map[unit_in]/scale_map[unit_out]
+ return value
def convertBridgeToHAZUSclass(AIM):
#TODO: replace labels in AIM with standard CamelCase versions
- structureType = AIM["BridgeClass"]
- # if type(structureType)== str and len(structureType)>3 and structureType[:3] == "HWB" and 0 < int(structureType[3:]) and 29 > int(structureType[3:]):
- # return AIM["bridge_class"]
- state = AIM["StateCode"]
+ structureType = AIM["BridgeClass"]
+ # if type(structureType)== str and len(structureType)>3 and structureType[:3] == "HWB" and 0 < int(structureType[3:]) and 29 > int(structureType[3:]):
+ # return AIM["bridge_class"]
+ state = AIM["StateCode"]
yr_built = AIM["YearBuilt"]
num_span = AIM["NumOfSpans"]
- len_max_span = AIM["MaxSpanLength"]
+ len_max_span = AIM["MaxSpanLength"]
+ len_unit = AIM["units"]["length"]
+ len_max_span = convertUnits(len_max_span, len_unit, "m")
+
seismic = ((int(state)==6 and int(yr_built)>=1975) or
(int(state)!=6 and int(yr_built)>=1990))
-
- # Use a catch-all, other class by default
- bridge_class = "HWB28"
-
- if len_max_span > 150:
- if not seismic:
- bridge_class = "HWB1"
- else:
- bridge_class = "HWB2"
-
- elif num_span == 1:
- if not seismic:
- bridge_class = "HWB3"
- else:
- bridge_class = "HWB4"
-
- elif structureType in list(range(101,107)):
- if not seismic:
- if state != 6:
- bridge_class = "HWB5"
- else:
- bridge_class = "HWB6"
- else:
- bridge_class = "HWB7"
-
- elif structureType in [205,206]:
- if not seismic:
- bridge_class = "HWB8"
- else:
- bridge_class = "HWB9"
-
- elif structureType in list(range(201,207)):
- if not seismic:
- bridge_class = "HWB10"
- else:
- bridge_class = "HWB11"
-
- elif structureType in list(range(301,307)):
- if not seismic:
- if len_max_span>=20:
- if state != 6:
- bridge_class = "HWB12"
- else:
- bridge_class = "HWB13"
- else:
- if state != 6:
- bridge_class = "HWB24"
- else:
- bridge_class = "HWB25"
- else:
- bridge_class = "HWB14"
-
- elif structureType in list(range(402,411)):
- if not seismic:
- if len_max_span>=20:
- bridge_class = "HWB15"
- elif state != 6:
- bridge_class = "HWB26"
- else:
- bridge_class = "HWB27"
- else:
- bridge_class = "HWB16"
-
- elif structureType in list(range(501,507)):
- if not seismic:
- if state != 6:
- bridge_class = "HWB17"
- else:
- bridge_class = "HWB18"
- else:
- bridge_class = "HWB19"
-
- elif structureType in [605,606]:
- if not seismic:
- bridge_class = "HWB20"
- else:
- bridge_class = "HWB21"
-
- elif structureType in list(range(601,608)):
- if not seismic:
- bridge_class = "HWB22"
- else:
- bridge_class = "HWB23"
-
-
- #TODO: review and add HWB24-27 rules
- #TODO: also double check rules for HWB10-11 and HWB22-23
-
- return bridge_class
-
-
- # original code by JZ
- """
- if not seismic and len_max_span > 150:
- return "HWB1"
- elif seismic and len_max_span > 150:
- return "HWB2"
- elif not seismic and num_span == 1:
- return "HWB3"
- elif seismic and num_span == 1:
- return "HWB4"
- elif not seismic and 101 <= structureType and structureType <= 106 and state != 6:
- return "HWB5"
- elif not seismic and 101 <= structureType and structureType <= 106 and state ==6:
- return "HWB6"
- elif seismic and 101 <= structureType and structureType <= 106:
- return "HWB7"
- elif not seismic and 205 <= structureType and structureType <= 206:
- return "HWB8"
- elif seismic and 205 <= structureType and structureType <= 206:
- return "HWB9"
- elif not seismic and 201 <= structureType and structureType <= 206:
- return "HWB10"
- elif seismic and 201 <= structureType and structureType <= 206:
- return "HWB11"
- elif not seismic and 301 <= structureType and structureType <= 306 and state != 6:
- return "HWB12"
- elif not seismic and 301 <= structureType and structureType <= 306 and state == 6:
- return "HWB13"
- elif seismic and 301 <= structureType and structureType <= 306:
- return "HWB14"
- elif not seismic and 402 <= structureType and structureType <= 410:
- return "HWB15"
- elif seismic and 402 <= structureType and structureType <= 410:
- return "HWB16"
- elif not seismic and 501 <= structureType and structureType <= 506 and state != 6:
- return "HWB17"
- elif not seismic and 501 <= structureType and structureType <= 506 and state == 6:
- return "HWB18"
- elif seismic and 501 <= structureType and structureType <= 506:
- return "HWB19"
- elif not seismic and 605 <= structureType and structureType <= 606:
- return "HWB20"
- elif seismic and 605 <= structureType and structureType <= 606:
- return "HWB21"
- elif not seismic and 601 <= structureType and structureType <= 607:
- return "HWB22"
- elif seismic and 601 <= structureType and structureType <= 607:
- return "HWB23"
-
- elif not seismic and 301 <= structureType and structureType <= 306 and state != 6:
- return "HWB24"
- elif not seismic and 301 <= structureType and structureType <= 306 and state == 6:
- return "HWB25"
- elif not seismic and 402 <= structureType and structureType <= 410 and state != 6:
- return "HWB26"
- elif not seismic and 402 <= structureType and structureType <= 410 and state == 6:
- return "HWB27"
- else:
- return "HWB28"
- """
-
-def convertTunnelToHAZUSclass(AIM):
-
- if ("Bored" in AIM["ConstructType"]) or ("Drilled" in AIM["ConstructType"]):
- return "HTU1"
- elif ("Cut" in AIM["ConstructType"]) or ("Cover" in AIM["ConstructType"]):
- return "HTU2"
- else:
- # Select HTU2 for unclassfied tunnels because it is more conservative.
- return "HTU2"
-
-def convertRoadToHAZUSclass(AIM):
-
- if AIM["RoadType"] in ["Primary", "Secondary"]:
- return "HRD1"
-
- elif AIM["RoadType"]=="Residential":
- return "HRD2"
-
- else:
- # many unclassified roads are urban roads
- return "HRD2"
-
-def convert_story_rise(structureType, stories):
-
-
- if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']:
-
- # These archetypes have no rise information in their IDs
- rise = None
-
- else:
-
- # First, check if we have valid story information
- try:
-
- stories = int(stories)
-
- except:
-
- raise ValueError('Missing "NumberOfStories" information, '
- 'cannot infer rise attribute of archetype')
-
- if structureType == 'RM1':
-
- if stories <= 3:
- rise = "L"
-
- else:
- rise = "M"
-
- elif structureType == 'URM':
- if stories <= 2:
- rise = "L"
-
- else:
- rise = "M"
-
- elif structureType in ['S1', 'S2', 'S4', 'S5', 'C1', 'C2', 'C3', \
- 'PC2', 'RM2']:
- if stories <=3:
- rise = "L"
-
- elif stories <= 7:
- rise = "M"
-
- else:
- rise = "H"
-
- return rise
-
-def auto_populate(AIM):
- """
- Automatically creates a performance model for PGA-based Hazus EQ analysis.
-
- Parameters
- ----------
- AIM: dict
- Asset Information Model - provides features of the asset that can be
- used to infer attributes of the performance model.
-
- Returns
- -------
- GI_ap: dict
- Extended General Information - extends the GI from the input AIM with
- additional inferred features. These features are typically used in
- intermediate steps during the auto-population and are not required
- for the performance assessment. They are returned to allow reviewing
- how these latent variables affect the final results.
- DL_ap: dict
- Damage and Loss parameters - these define the performance model and
- details of the calculation.
- CMP: DataFrame
- Component assignment - Defines the components (in rows) and their
- location, direction, and quantity (in columns).
- """
-
- # extract the General Information
- GI = AIM.get('GeneralInformation', None)
-
- if GI==None:
- #TODO: show an error message
- pass
-
- # initialize the auto-populated GI
- GI_ap = GI.copy()
-
- assetType = AIM["assetType"]
- ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"]
-
- if assetType=="Buildings":
-
- # get the building parameters
- bt = GI['StructureType'] #building type
-
- # get the design level
- dl = GI.get('DesignLevel', None)
-
- if dl == None:
- # If there is no DesignLevel provided, we assume that the YearBuilt is
- # available
- year_built = GI['YearBuilt']
-
- if 'W1' in bt:
- DesignL = ap_DesignLevel_W1
- else:
- DesignL = ap_DesignLevel
-
- for year in sorted(DesignL.keys()):
- if year_built <= year:
- dl = DesignL[year]
- break
-
- GI_ap['DesignLevel'] = dl
-
- # get the number of stories / height
- stories = GI.get('NumberOfStories', None)
-
- # We assume that the structure type does not include height information
- # and we append it here based on the number of story information
- rise = convert_story_rise(bt, stories)
-
- if rise is not None:
- LF = f'LF.{bt}.{rise}.{dl}'
- GI_ap['BuildingRise'] = rise
- else:
- LF = f'LF.{bt}.{dl}'
-
-
- CMP = pd.DataFrame(
- {f'{LF}': [ 'ea', 1, 1, 1, 'N/A']},
- index = [ 'Units','Location','Direction','Theta_0','Family']
- ).T
-
- # if needed, add components to simulate damage from ground failure
- if ground_failure:
-
- foundation_type = 'S'
-
- FG_GF_H = f'GF.H.{foundation_type}'
- FG_GF_V = f'GF.V.{foundation_type}'
-
- CMP_GF = pd.DataFrame(
- {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'],
- f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']},
- index = [ 'Units','Location','Direction','Theta_0','Family']
- ).T
-
- CMP = pd.concat([CMP, CMP_GF], axis=0)
-
- # set the number of stories to 1
- # there is only one component in a building-level resolution
- stories = 1
-
- # get the occupancy class
- if GI['OccupancyClass'] in ap_Occupancy.keys():
- ot = ap_Occupancy[GI['OccupancyClass']]
- else:
- ot = GI['OccupancyClass']
-
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Buildings",
- "NumberOfStories": f"{stories}",
- "OccupancyType": f"{ot}",
- "PlanArea": "1"
- },
- "Damage": {
- "DamageProcess": "Hazus Earthquake"
- },
- "Demands": {
- },
- "Losses": {
- "BldgRepair": {
- "ConsequenceDatabase": "Hazus Earthquake - Buildings",
- "MapApproach": "Automatic"
- }
- }
- }
-
- elif assetType == "TransportationNetwork":
-
- inf_type = GI["assetSubtype"]
-
- if inf_type == "HwyBridge":
-
- # get the bridge class
- bt = convertBridgeToHAZUSclass(GI)
- GI_ap['BridgeHazusClass'] = bt
-
- CMP = pd.DataFrame(
- {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A'],
- f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']},
- index = [ 'Units','Location','Direction','Theta_0','Family']
- ).T
-
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Transportation",
- "BridgeHazusClass": bt,
- "PlanArea": "1"
- },
- "Damage": {
- "DamageProcess": "Hazus Earthquake"
- },
- "Demands": {
- },
- "Losses": {
- "BldgRepair": {
- "ConsequenceDatabase": "Hazus Earthquake - Transportation",
- "MapApproach": "Automatic"
- }
- }
- }
-
- elif inf_type == "HwyTunnel":
-
- # get the tunnel class
- tt = convertTunnelToHAZUSclass(GI)
- GI_ap['TunnelHazusClass'] = tt
-
- CMP = pd.DataFrame(
- {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A'],
- f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']},
- index = [ 'Units','Location','Direction','Theta_0','Family']
- ).T
-
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Transportation",
- "TunnelHazusClass": tt,
- "PlanArea": "1"
- },
- "Damage": {
- "DamageProcess": "Hazus Earthquake"
- },
- "Demands": {
- },
- "Losses": {
- "BldgRepair": {
- "ConsequenceDatabase": "Hazus Earthquake - Transportation",
- "MapApproach": "Automatic"
- }
- }
- }
- elif inf_type == "Roadway":
-
- # get the road class
- rt = convertRoadToHAZUSclass(GI)
- GI_ap['RoadHazusClass'] = rt
-
- CMP = pd.DataFrame(
- {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']},
- index = [ 'Units','Location','Direction','Theta_0','Family']
- ).T
-
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Transportation",
- "RoadHazusClass": rt,
- "PlanArea": "1"
- },
- "Damage": {
- "DamageProcess": "Hazus Earthquake"
- },
- "Demands": {
- },
- "Losses": {
- "BldgRepair": {
- "ConsequenceDatabase": "Hazus Earthquake - Transportation",
- "MapApproach": "Automatic"
- }
- }
- }
- else:
- print("subtype not supported in HWY")
- else:
- print(f"AssetType: {assetType} is not supported in Hazus Earthquake IM DL method")
-
- return GI_ap, DL_ap, CMP
\ No newline at end of file
+
+ # Use a catch-all, other class by default
+ bridge_class = "HWB28"
+
+ if len_max_span > 150:
+ if not seismic:
+ bridge_class = "HWB1"
+ else:
+ bridge_class = "HWB2"
+
+ elif num_span == 1:
+ if not seismic:
+ bridge_class = "HWB3"
+ else:
+ bridge_class = "HWB4"
+
+ elif structureType in list(range(101,107)):
+ if not seismic:
+ if state != 6:
+ bridge_class = "HWB5"
+ else:
+ bridge_class = "HWB6"
+ else:
+ bridge_class = "HWB7"
+
+ elif structureType in [205,206]:
+ if not seismic:
+ bridge_class = "HWB8"
+ else:
+ bridge_class = "HWB9"
+
+ elif structureType in list(range(201,207)):
+ if not seismic:
+ bridge_class = "HWB10"
+ else:
+ bridge_class = "HWB11"
+
+ elif structureType in list(range(301,307)):
+ if not seismic:
+ if len_max_span>=20:
+ if state != 6:
+ bridge_class = "HWB12"
+ else:
+ bridge_class = "HWB13"
+ else:
+ if state != 6:
+ bridge_class = "HWB24"
+ else:
+ bridge_class = "HWB25"
+ else:
+ bridge_class = "HWB14"
+
+ elif structureType in list(range(402,411)):
+ if not seismic:
+ if len_max_span>=20:
+ bridge_class = "HWB15"
+ elif state != 6:
+ bridge_class = "HWB26"
+ else:
+ bridge_class = "HWB27"
+ else:
+ bridge_class = "HWB16"
+
+ elif structureType in list(range(501,507)):
+ if not seismic:
+ if state != 6:
+ bridge_class = "HWB17"
+ else:
+ bridge_class = "HWB18"
+ else:
+ bridge_class = "HWB19"
+
+ elif structureType in [605,606]:
+ if not seismic:
+ bridge_class = "HWB20"
+ else:
+ bridge_class = "HWB21"
+
+ elif structureType in list(range(601,608)):
+ if not seismic:
+ bridge_class = "HWB22"
+ else:
+ bridge_class = "HWB23"
+
+
+ #TODO: review and add HWB24-27 rules
+ #TODO: also double check rules for HWB10-11 and HWB22-23
+
+ return bridge_class
+
+
+ # original code by JZ
+ """
+ if not seismic and len_max_span > 150:
+ return "HWB1"
+ elif seismic and len_max_span > 150:
+ return "HWB2"
+ elif not seismic and num_span == 1:
+ return "HWB3"
+ elif seismic and num_span == 1:
+ return "HWB4"
+ elif not seismic and 101 <= structureType and structureType <= 106 and state != 6:
+ return "HWB5"
+ elif not seismic and 101 <= structureType and structureType <= 106 and state ==6:
+ return "HWB6"
+ elif seismic and 101 <= structureType and structureType <= 106:
+ return "HWB7"
+ elif not seismic and 205 <= structureType and structureType <= 206:
+ return "HWB8"
+ elif seismic and 205 <= structureType and structureType <= 206:
+ return "HWB9"
+ elif not seismic and 201 <= structureType and structureType <= 206:
+ return "HWB10"
+ elif seismic and 201 <= structureType and structureType <= 206:
+ return "HWB11"
+ elif not seismic and 301 <= structureType and structureType <= 306 and state != 6:
+ return "HWB12"
+ elif not seismic and 301 <= structureType and structureType <= 306 and state == 6:
+ return "HWB13"
+ elif seismic and 301 <= structureType and structureType <= 306:
+ return "HWB14"
+ elif not seismic and 402 <= structureType and structureType <= 410:
+ return "HWB15"
+ elif seismic and 402 <= structureType and structureType <= 410:
+ return "HWB16"
+ elif not seismic and 501 <= structureType and structureType <= 506 and state != 6:
+ return "HWB17"
+ elif not seismic and 501 <= structureType and structureType <= 506 and state == 6:
+ return "HWB18"
+ elif seismic and 501 <= structureType and structureType <= 506:
+ return "HWB19"
+ elif not seismic and 605 <= structureType and structureType <= 606:
+ return "HWB20"
+ elif seismic and 605 <= structureType and structureType <= 606:
+ return "HWB21"
+ elif not seismic and 601 <= structureType and structureType <= 607:
+ return "HWB22"
+ elif seismic and 601 <= structureType and structureType <= 607:
+ return "HWB23"
+
+ elif not seismic and 301 <= structureType and structureType <= 306 and state != 6:
+ return "HWB24"
+ elif not seismic and 301 <= structureType and structureType <= 306 and state == 6:
+ return "HWB25"
+ elif not seismic and 402 <= structureType and structureType <= 410 and state != 6:
+ return "HWB26"
+ elif not seismic and 402 <= structureType and structureType <= 410 and state == 6:
+ return "HWB27"
+ else:
+ return "HWB28"
+ """
+
+def convertTunnelToHAZUSclass(AIM):
+
+ if ("Bored" in AIM["ConstructType"]) or ("Drilled" in AIM["ConstructType"]):
+ return "HTU1"
+ elif ("Cut" in AIM["ConstructType"]) or ("Cover" in AIM["ConstructType"]):
+ return "HTU2"
+ else:
+ # Select HTU2 for unclassfied tunnels because it is more conservative.
+ return "HTU2"
+
+def convertRoadToHAZUSclass(AIM):
+
+ if AIM["RoadType"] in ["Primary", "Secondary"]:
+ return "HRD1"
+
+ elif AIM["RoadType"]=="Residential":
+ return "HRD2"
+
+ else:
+ # many unclassified roads are urban roads
+ return "HRD2"
+
+def convert_story_rise(structureType, stories):
+
+
+ if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']:
+
+ # These archetypes have no rise information in their IDs
+ rise = None
+
+ else:
+
+ # First, check if we have valid story information
+ try:
+
+ stories = int(stories)
+
+ except:
+
+ raise ValueError('Missing "NumberOfStories" information, '
+ 'cannot infer rise attribute of archetype')
+
+ if structureType == 'RM1':
+
+ if stories <= 3:
+ rise = "L"
+
+ else:
+ rise = "M"
+
+ elif structureType == 'URM':
+ if stories <= 2:
+ rise = "L"
+
+ else:
+ rise = "M"
+
+ elif structureType in ['S1', 'S2', 'S4', 'S5', 'C1', 'C2', 'C3', \
+ 'PC2', 'RM2']:
+ if stories <=3:
+ rise = "L"
+
+ elif stories <= 7:
+ rise = "M"
+
+ else:
+ rise = "H"
+
+ return rise
+
+def auto_populate(AIM):
+ """
+ Automatically creates a performance model for PGA-based Hazus EQ analysis.
+
+ Parameters
+ ----------
+ AIM: dict
+ Asset Information Model - provides features of the asset that can be
+ used to infer attributes of the performance model.
+
+ Returns
+ -------
+ GI_ap: dict
+ Extended General Information - extends the GI from the input AIM with
+ additional inferred features. These features are typically used in
+ intermediate steps during the auto-population and are not required
+ for the performance assessment. They are returned to allow reviewing
+ how these latent variables affect the final results.
+ DL_ap: dict
+ Damage and Loss parameters - these define the performance model and
+ details of the calculation.
+ CMP: DataFrame
+ Component assignment - Defines the components (in rows) and their
+ location, direction, and quantity (in columns).
+ """
+
+ # extract the General Information
+ GI = AIM.get('GeneralInformation', None)
+
+ if GI==None:
+ #TODO: show an error message
+ pass
+
+ # initialize the auto-populated GI
+ GI_ap = GI.copy()
+
+ assetType = AIM["assetType"]
+ ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"]
+
+ if assetType=="Buildings":
+
+ # get the building parameters
+ bt = GI['StructureType'] #building type
+
+ # get the design level
+ dl = GI.get('DesignLevel', None)
+
+ if dl == None:
+ # If there is no DesignLevel provided, we assume that the YearBuilt is
+ # available
+ year_built = GI['YearBuilt']
+
+ if 'W1' in bt:
+ DesignL = ap_DesignLevel_W1
+ else:
+ DesignL = ap_DesignLevel
+
+ for year in sorted(DesignL.keys()):
+ if year_built <= year:
+ dl = DesignL[year]
+ break
+
+ GI_ap['DesignLevel'] = dl
+
+ # get the number of stories / height
+ stories = GI.get('NumberOfStories', None)
+
+ # We assume that the structure type does not include height information
+ # and we append it here based on the number of story information
+ rise = convert_story_rise(bt, stories)
+
+ if rise is not None:
+ LF = f'LF.{bt}.{rise}.{dl}'
+ GI_ap['BuildingRise'] = rise
+ else:
+ LF = f'LF.{bt}.{dl}'
+
+
+ CMP = pd.DataFrame(
+ {f'{LF}': [ 'ea', 1, 1, 1, 'N/A']},
+ index = [ 'Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ # if needed, add components to simulate damage from ground failure
+ if ground_failure:
+
+ foundation_type = 'S'
+
+ FG_GF_H = f'GF.H.{foundation_type}'
+ FG_GF_V = f'GF.V.{foundation_type}'
+
+ CMP_GF = pd.DataFrame(
+ {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'],
+ f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']},
+ index = [ 'Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ CMP = pd.concat([CMP, CMP_GF], axis=0)
+
+ # set the number of stories to 1
+ # there is only one component in a building-level resolution
+ stories = 1
+
+ # get the occupancy class
+ if GI['OccupancyClass'] in ap_Occupancy.keys():
+ ot = ap_Occupancy[GI['OccupancyClass']]
+ else:
+ ot = GI['OccupancyClass']
+
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Buildings",
+ "NumberOfStories": f"{stories}",
+ "OccupancyType": f"{ot}",
+ "PlanArea": "1"
+ },
+ "Damage": {
+ "DamageProcess": "Hazus Earthquake"
+ },
+ "Demands": {
+ },
+ "Losses": {
+ "BldgRepair": {
+ "ConsequenceDatabase": "Hazus Earthquake - Buildings",
+ "MapApproach": "Automatic"
+ }
+ }
+ }
+
+ elif assetType == "TransportationNetwork":
+
+ inf_type = GI["assetSubtype"]
+
+ if inf_type == "HwyBridge":
+
+ # get the bridge class
+ bt = convertBridgeToHAZUSclass(GI)
+ GI_ap['BridgeHazusClass'] = bt
+
+ CMP = pd.DataFrame(
+ {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A'],
+ f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']},
+ index = [ 'Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Transportation",
+ "BridgeHazusClass": bt,
+ "PlanArea": "1"
+ },
+ "Damage": {
+ "DamageProcess": "Hazus Earthquake"
+ },
+ "Demands": {
+ },
+ "Losses": {
+ "BldgRepair": {
+ "ConsequenceDatabase": "Hazus Earthquake - Transportation",
+ "MapApproach": "Automatic"
+ }
+ }
+ }
+
+ elif inf_type == "HwyTunnel":
+
+ # get the tunnel class
+ tt = convertTunnelToHAZUSclass(GI)
+ GI_ap['TunnelHazusClass'] = tt
+
+ CMP = pd.DataFrame(
+ {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A'],
+ f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']},
+ index = [ 'Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Transportation",
+ "TunnelHazusClass": tt,
+ "PlanArea": "1"
+ },
+ "Damage": {
+ "DamageProcess": "Hazus Earthquake"
+ },
+ "Demands": {
+ },
+ "Losses": {
+ "BldgRepair": {
+ "ConsequenceDatabase": "Hazus Earthquake - Transportation",
+ "MapApproach": "Automatic"
+ }
+ }
+ }
+ elif inf_type == "Roadway":
+
+ # get the road class
+ rt = convertRoadToHAZUSclass(GI)
+ GI_ap['RoadHazusClass'] = rt
+
+ CMP = pd.DataFrame(
+ {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']},
+ index = [ 'Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Transportation",
+ "RoadHazusClass": rt,
+ "PlanArea": "1"
+ },
+ "Damage": {
+ "DamageProcess": "Hazus Earthquake"
+ },
+ "Demands": {
+ },
+ "Losses": {
+ "BldgRepair": {
+ "ConsequenceDatabase": "Hazus Earthquake - Transportation",
+ "MapApproach": "Automatic"
+ }
+ }
+ }
+ else:
+ print("subtype not supported in HWY")
+
+ elif assetType == "WaterDistributionNetwork":
+
+ pipe_material_map ={"CI": "B", "AC": "B", "RCC": "B",
+ "DI": "D", "PVC": "D",
+ "DS": "B",
+ "BS": "D",}
+
+ #GI = AIM.get("GeneralInformation", None)
+ #if GI==None:
+
+
+ # initialize the auto-populated GI
+ wdn_element_type = GI_ap.get("type", "MISSING")
+ asset_name = GI_ap.get("AIM_id", None)
+
+
+ if wdn_element_type == "Pipe":
+ pipe_construction_year = GI_ap.get("year", None)
+ pipe_diameter = GI_ap.get("Diam", None)
+ #diamaeter value is a fundamental part of hydraulic performance assessment
+ if pipe_diameter == None:
+ raise ValueError(f"pipe diamater in asset type {assetType}, \
+ asset id \"{asset_name}\" has no diameter \
+ value.")
+
+ pipe_length = GI_ap.get("Len", None)
+ #length value is a fundamental part of hydraulic performance assessment
+ if pipe_diameter == None:
+ raise ValueError(f"pipe length in asset type {assetType}, \
+ asset id \"{asset_name}\" has no diameter \
+ value.")
+
+ pipe_material = GI_ap.get("material", None)
+
+ #pipe material can be not available or named "missing" in both case, pipe flexibility will be set to "missing"
+
+ """
+ The assumed logic (rullset) is that if the material is missing, if the pipe
+ is smaller than or equal to 20 inches, the material is Cast Iron
+ (CI) otherwise the pipe material is steel.
+ If the material is steel (ST), either based on user specified
+ input or the assumption due to the lack of the user-input, the year
+ that the pipe is constructed define the flexibility status per HAZUS
+ instructions. If the pipe is built in 1935 or after, it is, the pipe
+ is Ductile Steel (DS), and otherwise it is Brittle Steel (BS).
+ If the pipe is missing construction year and is built by steel,
+ we assume consevatively that the pipe is brittle (i.e., BS)
+ """
+ if pipe_material == None:
+ if pipe_diameter > 20 * 0.0254: #20 inches in meter
+ print(f"Asset {asset_name} is missing material. Material is\
+ assumed to be Cast Iron")
+ pipe_material = "CI"
+ else:
+ print(f"Asset {asset_name} is missing material. Material is "
+ f"assumed to be Steel (ST)")
+ pipe_material = "ST"
+
+ if pipe_material == "ST":
+ if pipe_construction_year != None and pipe_construction_year >= 1935:
+ print(f"Asset {asset_name} has material of \"ST\" is assumed to be\
+ Ductile Steel")
+ pipe_material = "DS"
+ else:
+ print(f'Asset {asset_name} has material of "ST" is assumed to be '
+ f'Brittle Steel')
+ pipe_material = "BS"
+
+ pipe_flexibility = pipe_material_map.get(pipe_material, "missing")
+
+ GI_ap["material flexibility"] = pipe_flexibility
+ GI_ap["material"] = pipe_material
+
+
+ # Pipes are broken into 20ft segments (rounding up) and
+ # each segment is represented by an individual entry in
+ # the performance model, `CMP`. The damage capcity of each
+ # segment is assumed to be independent and driven by the
+ # same EDP. We therefore replicate the EDP associated with
+ # the pipe to the various locations assgined to the
+ # segments.
+
+ # Determine number of segments
+ with open(
+ os.path.join(
+ os.path.dirname(pelicun.__file__), 'settings/default_units.json'
+ ),
+ 'r',
+ encoding='utf-8',
+ ) as f:
+ units = json.load(f)
+ pipe_length_unit = GI_ap['units']['length']
+ pipe_length_unit_factor = units['length'][pipe_length_unit]
+ pipe_length_in_base_unit = pipe_length * pipe_length_unit_factor
+ reference_length_in_base_unit = 20.00 * units['length']['ft']
+ if pipe_length_in_base_unit % reference_length_in_base_unit < 1e-2:
+ # If the lengths are equal, then that's one segment, not two.
+ num_segments = int(pipe_length_in_base_unit / reference_length_in_base_unit)
+ else:
+ # In all other cases, round up.
+ num_segments = int(pipe_length_in_base_unit / reference_length_in_base_unit) + 1
+ if num_segments > 1:
+ location_string = f'1--{num_segments}'
+ else:
+ location_string = '1'
+
+ # Define performance model
+ CMP = pd.DataFrame(
+ {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'],
+ f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'],
+ f'aggregate': ['ea', location_string, '0', 1, 'N/A']},
+ index = ['Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ # Set up the demand cloning configuration for the pipe
+ # segments, if required.
+ demand_config = {}
+ if num_segments > 1:
+ # determine the EDP tags available for cloning
+ response_data = pelicun.file_io.load_data('response.csv', None)
+ num_header_entries = len(response_data.columns.names)
+ # if 4, assume a hazard level tag is present and remove it
+ if num_header_entries == 4:
+ response_data.columns = pd.MultiIndex.from_tuples(
+ [x[1::] for x in response_data.columns]
+ )
+ demand_cloning_config = {}
+ for edp in response_data.columns:
+ tag, location, direction = edp
+
+ demand_cloning_config['-'.join(edp)] = [
+ f'{tag}-{x}-{direction}'
+ for x in [f'{i+1}' for i in range(num_segments)]
+ ]
+ demand_config = {'DemandCloning': demand_cloning_config}
+
+ # Create damage process
+ dmg_process = {
+ f"1_PWP.{pipe_flexibility}.GS": {"DS1": "aggregate_DS1"},
+ f"2_PWP.{pipe_flexibility}.GF": {"DS1": "aggregate_DS1"},
+ f"3_PWP.{pipe_flexibility}.GS": {"DS2": "aggregate_DS2"},
+ f"4_PWP.{pipe_flexibility}.GF": {"DS2": "aggregate_DS2"},
+ }
+ dmg_process_filename = 'dmg_process.json'
+ with open(dmg_process_filename, 'w', encoding='utf-8') as f:
+ json.dump(dmg_process, f, indent=2)
+
+ # Define the auto-populated config
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Water",
+ "Material Flexibility": pipe_flexibility,
+ "PlanArea": "1" # Sina: does not make sense for water. Kept it here since itw as also kept here for Transportation
+ },
+ "Damage": {
+ "DamageProcess": "User Defined",
+ "DamageProcessFilePath": "dmg_process.json"
+ },
+ "Demands": demand_config
+ }
+
+ elif wdn_element_type == "Tank":
+
+ tank_cmp_lines = {
+ ("OG", "C", 1):{'PST.G.C.A.GS': [ 'ea', 1, 1, 1, 'N/A' ]},
+ ("OG", "C", 0):{'PST.G.C.U.GS': [ 'ea', 1, 1, 1, 'N/A' ]},
+ ("OG", "S", 1):{'PST.G.S.A.GS': [ 'ea', 1, 1, 1, 'N/A' ]},
+ ("OG", "S", 0):{'PST.G.S.U.GS': [ 'ea', 1, 1, 1, 'N/A' ]},
+ #Anchored status and Wood is not defined for On Ground tanks
+ ("OG", "W", 0):{'PST.G.W.GS': [ 'ea', 1, 1, 1, 'N/A' ]},
+ #Anchored status and Steel is not defined for Above Ground tanks
+ ("AG", "S", 0):{'PST.A.S.GS': [ 'ea', 1, 1, 1, 'N/A' ]},
+ #Anchored status and Concrete is not defined for Buried tanks.
+ ("B", "C", 0):{'PST.B.C.GF': [ 'ea', 1, 1, 1, 'N/A' ]}
+ }
+
+ """
+ The default values are assumed: material = Concrete (C),
+ location= On Ground (OG), and Anchored = 1
+ """
+ tank_material = GI_ap.get("material", "C")
+ tank_location = GI_ap.get("location", "OG")
+ tank_anchored = GI_ap.get("anchored", int(1) )
+
+ tank_material_allowable = {"C", "S"}
+ if tank_material not in tank_material_allowable:
+ raise ValueError(f"Tank's material = \"{tank_material}\" is \
+ not allowable in tank {asset_name}. The \
+ material must be either C for concrete or S \
+ for steel.")
+
+ tank_location_allowable = {"AG", "OG", "B"}
+ if tank_location not in tank_location_allowable:
+ raise ValueError(f"Tank's location = \"{tank_location}\" is \
+ not allowable in tank {asset_name}. The \
+ location must be either \"AG\" for Above \
+ ground, \"OG\" for On Ground or \"BG\" for \
+ Bellow Ground (burried) Tanks.")
+
+ tank_anchored_allowable = {int(0), int(1)}
+ if tank_anchored not in tank_anchored_allowable:
+ raise ValueError(f"Tank's anchored status = \"{tank_location}\
+ \" is not allowable in tank {asset_name}. \
+ The anchored status must be either integer\
+ value 0 for unachored, or 1 for anchored")
+
+ if tank_location == "AG" and tank_material == "C":
+ print(f"The tank {asset_name} is Above Ground (i.e., AG), but \
+ the material type is Concrete (\"C\"). Tank type \"C\" is not \
+ defiend for AG tanks. The tank is assumed to be Steel (\"S\")")
+ tank_material = "S"
+
+ if tank_location == "AG" and tank_material == "W":
+ print(f"The tank {asset_name} is Above Ground (i.e., AG), but \
+ the material type is Wood (\"W\"). Tank type \"W\" is not \
+ defiend for AG tanks. The tank is assumed to be Steel (\"S\")")
+ tank_material = "S"
+
+
+ if tank_location == "B" and tank_material == "S":
+ print(f"The tank {asset_name} is burried (i.e., B), but the\
+ material type is Steel (\"S\"). Tank type \"S\" is not defiend for\
+ B tanks. The tank is assumed to be Concrete (\"C\")")
+ tank_material = "C"
+
+ if tank_location == "B" and tank_material == "W":
+ print(f"The tank {asset_name} is burried (i.e., B), but the\
+ material type is Wood (\"W\"). Tank type \"W\" is not defiend for\
+ B tanks. The tank is assumed to be Concrete (\"C\")")
+ tank_material = "C"
+
+ if tank_anchored == 1:
+ #Since anchore status does nto matter, there is no need to
+ #print a warning
+ tank_anchored = 0
+
+ cur_tank_cmp_line = tank_cmp_lines[(tank_location, tank_material, tank_anchored)]
+
+ CMP = pd.DataFrame(
+ cur_tank_cmp_line,
+ index = ['Units','Location','Direction','Theta_0','Family']
+ ).T
+
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Water",
+ "Material": tank_material,
+ "Location": tank_location,
+ "Anchored": tank_anchored,
+ "PlanArea": "1" # Sina: does not make sense for water. Kept it here since itw as also kept here for Transportation
+ },
+ "Damage": {
+ "DamageProcess": "Hazus Earthquake"
+ },
+ "Demands": {
+ }
+ }
+
+ else:
+ print(f"Water Distribution network element type {wdn_element_type} is not supported in Hazus Earthquake IM DL method")
+ DL_ap = None
+ CMP = None
+
+ else:
+ print(f"AssetType: {assetType} is not supported in Hazus Earthquake IM DL method")
+
+ return GI_ap, DL_ap, CMP
diff --git a/pelicun/tests/data/file_io/test_parse_units/additional_units_a.json b/pelicun/tests/data/base/test_parse_units/additional_units_a.json
similarity index 100%
rename from pelicun/tests/data/file_io/test_parse_units/additional_units_a.json
rename to pelicun/tests/data/base/test_parse_units/additional_units_a.json
diff --git a/pelicun/tests/data/file_io/test_parse_units/duplicate.json b/pelicun/tests/data/base/test_parse_units/duplicate.json
similarity index 100%
rename from pelicun/tests/data/file_io/test_parse_units/duplicate.json
rename to pelicun/tests/data/base/test_parse_units/duplicate.json
diff --git a/pelicun/tests/data/file_io/test_parse_units/duplicate2.json b/pelicun/tests/data/base/test_parse_units/duplicate2.json
similarity index 100%
rename from pelicun/tests/data/file_io/test_parse_units/duplicate2.json
rename to pelicun/tests/data/base/test_parse_units/duplicate2.json
diff --git a/pelicun/tests/data/file_io/test_parse_units/invalid.json b/pelicun/tests/data/base/test_parse_units/invalid.json
similarity index 100%
rename from pelicun/tests/data/file_io/test_parse_units/invalid.json
rename to pelicun/tests/data/base/test_parse_units/invalid.json
diff --git a/pelicun/tests/data/file_io/test_parse_units/not_dict.json b/pelicun/tests/data/base/test_parse_units/not_dict.json
similarity index 100%
rename from pelicun/tests/data/file_io/test_parse_units/not_dict.json
rename to pelicun/tests/data/base/test_parse_units/not_dict.json
diff --git a/pelicun/tests/data/file_io/test_parse_units/not_float.json b/pelicun/tests/data/base/test_parse_units/not_float.json
similarity index 100%
rename from pelicun/tests/data/file_io/test_parse_units/not_float.json
rename to pelicun/tests/data/base/test_parse_units/not_float.json
diff --git a/pelicun/tests/test_base.py b/pelicun/tests/test_base.py
index 8a5fc8a9c..b1b5b8524 100644
--- a/pelicun/tests/test_base.py
+++ b/pelicun/tests/test_base.py
@@ -601,3 +601,166 @@ def test_process_loc():
def test_run_input_specs():
assert os.path.basename(base.pelicun_path) == 'pelicun'
+
+
+def test_dict_raise_on_duplicates():
+ res = base.dict_raise_on_duplicates([('A', '1'), ('B', '2')])
+ assert res == {'A': '1', 'B': '2'}
+ with pytest.raises(ValueError):
+ base.dict_raise_on_duplicates([('A', '1'), ('A', '2')])
+
+
+def test_parse_units():
+ # Test the default units are parsed correctly
+ units = base.parse_units()
+ assert isinstance(units, dict)
+ expect = {
+ "sec": 1.0,
+ "minute": 60.0,
+ "hour": 3600.0,
+ "day": 86400.0,
+ "m": 1.0,
+ "mm": 0.001,
+ "cm": 0.01,
+ "km": 1000.0,
+ "in": 0.0254,
+ "inch": 0.0254,
+ "ft": 0.3048,
+ "mile": 1609.344,
+ "m2": 1.0,
+ "mm2": 1e-06,
+ "cm2": 0.0001,
+ "km2": 1000000.0,
+ "in2": 0.00064516,
+ "inch2": 0.00064516,
+ "ft2": 0.09290304,
+ "mile2": 2589988.110336,
+ "m3": 1.0,
+ "in3": 1.6387064e-05,
+ "inch3": 1.6387064e-05,
+ "ft3": 0.028316846592,
+ "cmps": 0.01,
+ "mps": 1.0,
+ "mph": 0.44704,
+ "inps": 0.0254,
+ "inchps": 0.0254,
+ "ftps": 0.3048,
+ "mps2": 1.0,
+ "inps2": 0.0254,
+ "inchps2": 0.0254,
+ "ftps2": 0.3048,
+ "g": 9.80665,
+ "kg": 1.0,
+ "ton": 1000.0,
+ "lb": 0.453592,
+ "N": 1.0,
+ "kN": 1000.0,
+ "lbf": 4.4482179868,
+ "kip": 4448.2179868,
+ "kips": 4448.2179868,
+ "Pa": 1.0,
+ "kPa": 1000.0,
+ "MPa": 1000000.0,
+ "GPa": 1000000000.0,
+ "psi": 6894.751669043338,
+ "ksi": 6894751.669043338,
+ "Mpsi": 6894751669.043338,
+ "A": 1.0,
+ "V": 1.0,
+ "kV": 1000.0,
+ "ea": 1.0,
+ "unitless": 1.0,
+ "rad": 1.0,
+ "C": 1.0,
+ "USD_2011": 1.0,
+ "USD": 1.0,
+ "loss_ratio": 1.0,
+ "worker_day": 1.0,
+ "EA": 1.0,
+ "SF": 0.09290304,
+ "LF": 0.3048,
+ "TN": 1000.0,
+ "AP": 1.0,
+ "CF": 0.0004719474432,
+ "KV": 1000.0,
+ "J": 1.0,
+ "MJ": 1000000.0,
+ "test_two": 2.00,
+ "test_three": 3.00,
+ }
+ for thing, value in units.items():
+ assert thing in expect
+ assert value == expect[thing]
+
+ # Test that additional units are parsed correctly
+ additional_units_file = (
+ 'pelicun/tests/data/base/test_parse_units/additional_units_a.json'
+ )
+ units = base.parse_units(additional_units_file)
+ assert isinstance(units, dict)
+ assert 'year' in units
+ assert units['year'] == 1.00
+
+ # Test that an exception is raised if the additional units file is not found
+ with pytest.raises(FileNotFoundError):
+ units = base.parse_units('invalid/file/path.json')
+
+ # Test that an exception is raised if the additional units file is
+ # not a valid JSON file
+ invalid_json_file = 'pelicun/tests/data/base/test_parse_units/invalid.json'
+ with pytest.raises(Exception):
+ units = base.parse_units(invalid_json_file)
+
+ # Test that an exception is raised if a unit is defined twice in
+ # the additional units file
+ duplicate_units_file = 'pelicun/tests/data/base/test_parse_units/duplicate2.json'
+ with pytest.raises(ValueError):
+ units = base.parse_units(duplicate_units_file)
+
+ # Test that an exception is raised if a unit conversion factor is not a float
+ invalid_units_file = 'pelicun/tests/data/base/test_parse_units/not_float.json'
+ with pytest.raises(TypeError):
+ units = base.parse_units(invalid_units_file)
+
+ # Test that we get an error if some first-level key does not point
+ # to a dictionary
+ invalid_units_file = 'pelicun/tests/data/base/test_parse_units/not_dict.json'
+ with pytest.raises(ValueError):
+ units = base.parse_units(invalid_units_file)
+
+
+def test_unit_conversion():
+ # Test scalar conversion from feet to meters
+ assert base.convert_units(1.00, 'ft', 'm') == 0.3048
+
+ # Test list conversion from feet to meters
+ feet_values = [1.0, 2.0, 3.0]
+ meter_values = [0.3048, 0.6096, 0.9144]
+ np.testing.assert_array_almost_equal(
+ base.convert_units(feet_values, 'ft', 'm'), meter_values
+ )
+
+ # Test numpy array conversion from feet to meters
+ feet_values = np.array([1.0, 2.0, 3.0])
+ meter_values = np.array([0.3048, 0.6096, 0.9144])
+ np.testing.assert_array_almost_equal(
+ base.convert_units(feet_values, 'ft', 'm'), meter_values
+ )
+
+ # Test conversion with explicit category
+ assert base.convert_units(1.00, 'ft', 'm', category='length') == 0.3048
+
+ # Test error handling for invalid input type
+ with pytest.raises(TypeError) as excinfo:
+ base.convert_units("one", 'ft', 'm')
+ assert str(excinfo.value) == 'Invalid input type for `values`'
+
+ # Test error handling for unknown unit
+ with pytest.raises(ValueError) as excinfo:
+ base.convert_units(1.00, 'xyz', 'm')
+ assert str(excinfo.value) == 'Unknown unit `xyz`'
+
+ # Test error handling for mismatched category
+ with pytest.raises(ValueError) as excinfo:
+ base.convert_units(1.00, 'ft', 'm', category='volume')
+ assert str(excinfo.value) == 'Unknown unit: `ft`'
diff --git a/pelicun/tests/test_file_io.py b/pelicun/tests/test_file_io.py
index 40617ec68..60d7478c4 100644
--- a/pelicun/tests/test_file_io.py
+++ b/pelicun/tests/test_file_io.py
@@ -55,134 +55,6 @@
# The tests maintain the order of definitions of the `file_io.py` file.
-def test_dict_raise_on_duplicates():
- res = file_io.dict_raise_on_duplicates([('A', '1'), ('B', '2')])
- assert res == {'A': '1', 'B': '2'}
- with pytest.raises(ValueError):
- file_io.dict_raise_on_duplicates([('A', '1'), ('A', '2')])
-
-
-def test_parse_units():
- # Test the default units are parsed correctly
- units = file_io.parse_units()
- assert isinstance(units, dict)
- expect = {
- "sec": 1.0,
- "minute": 60.0,
- "hour": 3600.0,
- "day": 86400.0,
- "m": 1.0,
- "mm": 0.001,
- "cm": 0.01,
- "km": 1000.0,
- "in": 0.0254,
- "inch": 0.0254,
- "ft": 0.3048,
- "mile": 1609.344,
- "m2": 1.0,
- "mm2": 1e-06,
- "cm2": 0.0001,
- "km2": 1000000.0,
- "in2": 0.00064516,
- "inch2": 0.00064516,
- "ft2": 0.09290304,
- "mile2": 2589988.110336,
- "m3": 1.0,
- "in3": 1.6387064e-05,
- "inch3": 1.6387064e-05,
- "ft3": 0.028316846592,
- "cmps": 0.01,
- "mps": 1.0,
- "mph": 0.44704,
- "inps": 0.0254,
- "inchps": 0.0254,
- "ftps": 0.3048,
- "mps2": 1.0,
- "inps2": 0.0254,
- "inchps2": 0.0254,
- "ftps2": 0.3048,
- "g": 9.80665,
- "kg": 1.0,
- "ton": 1000.0,
- "lb": 0.453592,
- "N": 1.0,
- "kN": 1000.0,
- "lbf": 4.4482179868,
- "kip": 4448.2179868,
- "kips": 4448.2179868,
- "Pa": 1.0,
- "kPa": 1000.0,
- "MPa": 1000000.0,
- "GPa": 1000000000.0,
- "psi": 6894.751669043338,
- "ksi": 6894751.669043338,
- "Mpsi": 6894751669.043338,
- "A": 1.0,
- "V": 1.0,
- "kV": 1000.0,
- "ea": 1.0,
- "unitless": 1.0,
- "rad": 1.0,
- "C": 1.0,
- "USD_2011": 1.0,
- "USD": 1.0,
- "loss_ratio": 1.0,
- "worker_day": 1.0,
- "EA": 1.0,
- "SF": 0.09290304,
- "LF": 0.3048,
- "TN": 1000.0,
- "AP": 1.0,
- "CF": 0.0004719474432,
- "KV": 1000.0,
- "J": 1.0,
- "MJ": 1000000.0,
- "test_two": 2.00,
- "test_three": 3.00,
- }
- for thing, value in units.items():
- assert thing in expect
- assert value == expect[thing]
-
- # Test that additional units are parsed correctly
- additional_units_file = (
- 'pelicun/tests/data/file_io/test_parse_units/additional_units_a.json'
- )
- units = file_io.parse_units(additional_units_file)
- assert isinstance(units, dict)
- assert 'year' in units
- assert units['year'] == 1.00
-
- # Test that an exception is raised if the additional units file is not found
- with pytest.raises(FileNotFoundError):
- units = file_io.parse_units('invalid/file/path.json')
-
- # Test that an exception is raised if the additional units file is
- # not a valid JSON file
- invalid_json_file = 'pelicun/tests/data/file_io/test_parse_units/invalid.json'
- with pytest.raises(Exception):
- units = file_io.parse_units(invalid_json_file)
-
- # Test that an exception is raised if a unit is defined twice in
- # the additional units file
- duplicate_units_file = (
- 'pelicun/tests/data/file_io/test_parse_units/duplicate2.json'
- )
- with pytest.raises(ValueError):
- units = file_io.parse_units(duplicate_units_file)
-
- # Test that an exception is raised if a unit conversion factor is not a float
- invalid_units_file = 'pelicun/tests/data/file_io/test_parse_units/not_float.json'
- with pytest.raises(TypeError):
- units = file_io.parse_units(invalid_units_file)
-
- # Test that we get an error if some first-level key does not point
- # to a dictionary
- invalid_units_file = 'pelicun/tests/data/file_io/test_parse_units/not_dict.json'
- with pytest.raises(ValueError):
- units = file_io.parse_units(invalid_units_file)
-
-
def test_save_to_csv():
# Test saving with orientation 0
data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]})
diff --git a/pelicun/tools/DL_calculation.py b/pelicun/tools/DL_calculation.py
index da08862dc..5449a84a9 100644
--- a/pelicun/tools/DL_calculation.py
+++ b/pelicun/tools/DL_calculation.py
@@ -105,6 +105,7 @@ def log_msg(msg):
'Hazus Earthquake - Buildings': 'damage_DB_Hazus_EQ_bldg.csv',
'Hazus Earthquake - Stories': 'damage_DB_Hazus_EQ_story.csv',
'Hazus Earthquake - Transportation': 'damage_DB_Hazus_EQ_trnsp.csv',
+ 'Hazus Earthquake - Water': 'damage_DB_Hazus_EQ_water.csv',
'Hazus Hurricane': 'damage_DB_SimCenter_Hazus_HU_bldg.csv',
},
'repair': {
@@ -164,7 +165,7 @@ def log_msg(msg):
'Sample': False,
'Statistics': False,
'GroupedSample': True,
- 'GroupedStatistics': False,
+ 'GroupedStatistics': True,
},
'Loss': {
'BldgRepair': {
@@ -306,6 +307,7 @@ def run_pelicun(
detailed_results,
regional,
output_format,
+ custom_model_dir,
**kwargs,
):
"""
@@ -327,6 +329,9 @@ def run_pelicun(
Path pointing to the location of a Python script with an auto_populate
method that automatically creates the performance model using data
provided in the AIM JSON file.
+ custom_model_dir: string, optional
+ Path pointing to a directory with files that define user-provided model
+ parameters for a customized damage and loss assessment.
detailed_results: bool, optional
If False, only the main statistics are saved.
@@ -361,6 +366,8 @@ def run_pelicun(
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
+ custom_dl_file_path = custom_model_dir #f"{config['commonFileDir']}/CustomDLModels/"
+
DL_config = config.get('DL', None)
if not DL_config:
log_msg("Damage and Loss configuration missing from config file. ")
@@ -370,6 +377,16 @@ def run_pelicun(
config_ap, CMP = auto_populate(config, auto_script_path)
+ if config_ap['DL'] is None:
+
+ log_msg(
+ "The prescribed auto-population script failed to identify "
+ "a valid damage and loss configuration for this asset. "
+ "Terminating analysis."
+ )
+
+ return 0
+
# add the demand information
config_ap['DL']['Demands'].update(
{'DemandFilePath': f'{demand_file}', 'SampleSize': f'{realizations}'}
@@ -552,7 +569,7 @@ def run_pelicun(
{
"SampleSize": sample_size,
'PreserveRawOrder': demand_config.get('CoupledDemands', False),
- # 'DemandCloning': demand_config.get('DemandCloning', False)
+ 'DemandCloning': demand_config.get('DemandCloning', False)
}
)
@@ -840,6 +857,8 @@ def run_pelicun(
if asset_config.get('ComponentDatabasePath', False) is not False:
extra_comps = asset_config['ComponentDatabasePath']
+ extra_comps = extra_comps.replace('CustomDLDataFolder', custom_dl_file_path)
+
component_db += [
extra_comps,
]
@@ -969,6 +988,18 @@ def run_pelicun(
adf.loc['irreparable', ('LS1', 'Theta_0')] = 1e10
adf.loc['irreparable', 'Incomplete'] = 0
+ # TODO: we can improve this by creating a water network-specific assessment class
+ if "Water" in asset_config['ComponentDatabase']:
+
+ # add a placeholder aggregate fragility that will never trigger
+ # damage, but allow damage processes to aggregate the various pipeline damages
+ adf.loc['aggregate', ('Demand', 'Directional')] = 1
+ adf.loc['aggregate', ('Demand', 'Offset')] = 0
+ adf.loc['aggregate', ('Demand', 'Type')] = 'Peak Ground Velocity'
+ adf.loc['aggregate', ('Demand', 'Unit')] = 'mps'
+ adf.loc['aggregate', ('LS1', 'Theta_0')] = 1e10
+ adf.loc['aggregate', 'Incomplete'] = 0
+
PAL.damage.load_damage_model(
component_db
+ [
@@ -1271,12 +1302,14 @@ def run_pelicun(
if bldg_repair_config.get('ConsequenceDatabasePath', False) is not False:
extra_comps = bldg_repair_config['ConsequenceDatabasePath']
+ extra_comps = extra_comps.replace('CustomDLDataFolder', custom_dl_file_path)
+
consequence_db += [
extra_comps,
]
extra_conseq_df = load_data(
- bldg_repair_config['ConsequenceDatabasePath'],
+ extra_comps,
unit_conversion_factors=None,
orientation=1,
reindex=False,
@@ -1498,9 +1531,18 @@ def run_pelicun(
)
elif bldg_repair_config['MapApproach'] == "User Defined":
- loss_map = pd.read_csv(
- bldg_repair_config['MapFilePath'], index_col=0
- )
+
+ if bldg_repair_config.get('MapFilePath', False) is not False:
+ loss_map_path = bldg_repair_config['MapFilePath']
+
+ loss_map_path = loss_map_path.replace(
+ 'CustomDLDataFolder', custom_dl_file_path)
+
+ else:
+ print("User defined loss map path missing. Terminating analysis")
+ return -1
+
+ loss_map = pd.read_csv(loss_map_path, index_col=0)
# prepare additional loss map entries, if needed
if 'DMG-collapse' not in loss_map.index:
@@ -1675,9 +1717,6 @@ def run_pelicun(
if 'damage_sample' not in locals():
damage_sample = PAL.damage.save_sample()
- if 'agg_repair' not in locals():
- agg_repair = PAL.bldg_repair.aggregate_losses()
-
damage_sample = damage_sample.groupby(level=[0, 3], axis=1).sum()
damage_sample_s = convert_to_SimpleIndex(damage_sample, axis=1)
@@ -1691,7 +1730,16 @@ def run_pelicun(
else:
damage_sample_s['irreparable'] = np.zeros(damage_sample_s.shape[0])
- agg_repair_s = convert_to_SimpleIndex(agg_repair, axis=1)
+ if loss_config is not None:
+
+ if 'agg_repair' not in locals():
+ agg_repair = PAL.bldg_repair.aggregate_losses()
+
+ agg_repair_s = convert_to_SimpleIndex(agg_repair, axis=1)
+
+ else:
+
+ agg_repair_s = pd.DataFrame()
summary = pd.concat(
[agg_repair_s, damage_sample_s[['collapse', 'irreparable']]], axis=1
@@ -1779,7 +1827,7 @@ def main():
)
parser.add_argument('--auto_script', default=None)
parser.add_argument('--resource_dir', default=None)
- parser.add_argument('--custom_fragility_dir', default=None)
+ parser.add_argument('--custom_model_dir', default=None)
parser.add_argument(
'--regional', default=False, type=str2bool, nargs='?', const=False
)
@@ -1818,7 +1866,7 @@ def main():
ground_failure=args.ground_failure,
auto_script_path=args.auto_script,
resource_dir=args.resource_dir,
- custom_fragility_dir=args.custom_fragility_dir,
+ custom_model_dir=args.custom_model_dir,
regional=args.regional,
output_format=args.output_format,
)