Skip to content

Commit

Permalink
Merge pull request #94 from NHERI-SimCenter/develop
Browse files Browse the repository at this point in the history
Merging develop into master as we are preparing for the v3.5 release
  • Loading branch information
zsarnoczay authored Jan 28, 2025
2 parents 7863a1a + 46c0e0f commit c97aaa8
Show file tree
Hide file tree
Showing 53 changed files with 762 additions and 104,855 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/build_docs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,9 @@ jobs:
os: [ubuntu-latest]
python-version: ['3.10']
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/checkout@v4
with:
submodules: true
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
Expand Down
15 changes: 6 additions & 9 deletions .github/workflows/deploy_to_pypi.yaml
Original file line number Diff line number Diff line change
@@ -1,32 +1,29 @@
name: Deploy to PyPI

on:
release:
types: [created]

jobs:
deploy:
runs-on: ubuntu-latest
permissions:
id-token: write
steps:
- name: Check out code
uses: actions/checkout@v3

uses: actions/checkout@v4
with:
submodules: true
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: '3.10'

- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install setuptools wheel twine
- name: Build package
run: |
python setup.py sdist bdist_wheel
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: zsarnoczay
password: ${{ secrets.PELICUN_GITHUB_TOKEN }}
# use trusted publisher, no password needed
10 changes: 3 additions & 7 deletions .github/workflows/docs_check.yaml
Original file line number Diff line number Diff line change
@@ -1,32 +1,28 @@
name: Check for Sphinx Warnings

on:
pull_request:
paths:
- "doc/**"
- "**/*.rst"
- ".github/workflows/docs_check.yaml"
- "setup.py"

jobs:
build:
runs-on: ubuntu-latest

steps:
- name: Check out the repository
uses: actions/checkout@v3

uses: actions/checkout@v4
with:
submodules: true
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'

- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y pandoc
python -m pip install -e .[development]
- name: Check for Sphinx warnings
run: |
sphinx-build -M html ./doc/source ./doc/_build --fail-on-warning
2 changes: 2 additions & 0 deletions .github/workflows/format_check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: chartboost/ruff-action@v1
with:
args: 'format --check'
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: chartboost/ruff-action@v1
with:
version: 0.7.0
9 changes: 3 additions & 6 deletions .github/workflows/spell_check.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
name: Spell Check

on: [push, pull_request]

jobs:
spell-check:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v2

uses: actions/checkout@v4
with:
submodules: true
- name: Run codespell
uses: codespell-project/actions-codespell@v2

6 changes: 3 additions & 3 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
name: Tests

on:
- push
- pull_request

jobs:
test:
runs-on: ${{ matrix.os }}
Expand All @@ -12,7 +10,9 @@ jobs:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.9', '3.10', '3.11','3.12']
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
Expand Down
100 changes: 82 additions & 18 deletions pelicun/assessment.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,18 @@
'Hazus Earthquake - Stories': 'damage_DB_Hazus_EQ_story.csv',
'Hazus Earthquake - Transportation': 'damage_DB_Hazus_EQ_trnsp.csv',
'Hazus Earthquake - Water': 'damage_DB_Hazus_EQ_water.csv',
'Hazus Earthquake - Power': 'damage_DB_Hazus_EQ_power.csv',
'Hazus Hurricane': 'damage_DB_SimCenter_Hazus_HU_bldg.csv',
},
'repair': {
'FEMA P-58': 'loss_repair_DB_FEMA_P58_2nd.csv',
'Hazus Earthquake - Buildings': 'loss_repair_DB_Hazus_EQ_bldg.csv',
'Hazus Earthquake - Stories': 'loss_repair_DB_Hazus_EQ_story.csv',
'Hazus Earthquake - Transportation': 'loss_repair_DB_Hazus_EQ_trnsp.csv',
'Hazus Hurricane': 'loss_repair_DB_SimCenter_Hazus_HU_bldg.csv',
'Hazus Hurricane': (
'loss_repair_DB_SimCenter_Hazus_HU_bldg.csv,'
'loss_repair_DB_Hazus_FL_bldg.csv'
),
},
}

Expand Down Expand Up @@ -218,7 +222,10 @@ def get_default_data(self, data_name: str) -> pd.DataFrame:
'Please use `loss_repair_DB` instead.'
)

data_path = f'{base.pelicun_path}/resources/SimCenterDBDL/{data_name}.csv'
data_path = file_io.substitute_default_path(
[f'PelicunDefault/{data_name}.csv']
)[0]
assert isinstance(data_path, str)

data = file_io.load_data(
data_path, None, orientation=1, reindex=False, log=self.log
Expand Down Expand Up @@ -249,7 +256,10 @@ def get_default_metadata(self, data_name: str) -> dict:
'`fragility_DB` is deprecated and will be dropped in '
'future versions of pelicun. Please use `damage_DB` instead.'
)
data_path = f'{base.pelicun_path}/resources/SimCenterDBDL/{data_name}.json'
data_path = file_io.substitute_default_path(
[f'PelicunDefault/{data_name}.json']
)[0]
assert isinstance(data_path, str)

with Path(data_path).open(encoding='utf-8') as f:
data = json.load(f)
Expand Down Expand Up @@ -963,7 +973,10 @@ def calculate_damage( # noqa: C901
# load the fragility information
if component_database in default_dbs['fragility']:
component_db = [
'PelicunDefault/' + default_dbs['fragility'][component_database],
'PelicunDefault/' + filename
for filename in default_dbs['fragility'][component_database].split(
','
)
]
else:
component_db = []
Expand Down Expand Up @@ -1253,6 +1266,10 @@ def calculate_loss(
replacement_energy_parameters: dict[str, float | str] | None = None,
loss_map_path: str | None = None,
decision_variables: tuple[str, ...] | None = None,
replacement_configuration: (
tuple[uq.RandomVariableRegistry, dict[str, float]] | None
) = None,
loss_combination_method: str | None = None,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""
Calculate losses.
Expand Down Expand Up @@ -1284,6 +1301,11 @@ def calculate_loss(
Optional path to a loss map file.
decision_variables: tuple[str] or None
Optional decision variables for the assessment.
replacement_configuration: tuple or None
Loss thresholds of replacement consequences.
loss_combination_method: str, optional
String defining the method to use for combining losses for
components that represent different demands.
Returns
-------
Expand Down Expand Up @@ -1389,7 +1411,40 @@ def calculate_loss(

self.loss.calculate()

df_agg, exceedance_bool_df = self.loss.aggregate_losses(future=True)
if loss_combination_method is None:
loss_combination = None

elif loss_combination_method == 'Hazus Hurricane':
# assemble the combination dict for wind and storm surge
# open the base combination matrix
file_path = file_io.substitute_default_path(
['PelicunDefault/Wind_Flood_Hazus_HU_bldg.csv']
)[0]
assert isinstance(file_path, str)
combination_array = pd.read_csv(
file_path,
index_col=None,
header=None,
).to_numpy()

# get the component names
# assume that the first and second component in the loss map
# are the wind and flood components, respectively
wind_comp, flood_comp = loss_map.index.to_numpy()[[0, 1]]

loss_combination = {
'Cost': {
(wind_comp, flood_comp): combination_array,
},
}

else:
msg = f'Invalid loss combination method: `{loss_combination_method}`.'
raise ValueError(msg)

df_agg, exceedance_bool_df = self.loss.aggregate_losses(
replacement_configuration, loss_combination, future=True
)
assert isinstance(df_agg, pd.DataFrame)
assert isinstance(exceedance_bool_df, pd.DataFrame)
return df_agg, exceedance_bool_df
Expand Down Expand Up @@ -1426,12 +1481,19 @@ def load_consequence_info(
"""
if consequence_database in default_dbs['repair']:
default_consequence_dbs = default_dbs['repair'][
consequence_database
].split(',')

consequence_db = [
'PelicunDefault/' + default_dbs['repair'][consequence_database],
'PelicunDefault/' + filename for filename in default_consequence_dbs
]

conseq_df = self.get_default_data(
default_dbs['repair'][consequence_database][:-4]
conseq_df = pd.concat(
[
self.get_default_data(filename[:-4])
for filename in default_consequence_dbs
]
)
else:
consequence_db = []
Expand Down Expand Up @@ -1902,11 +1964,13 @@ def _loss__map_auto(
"""
# get the damage sample
dmg_sample = assessment.damage.save_sample()
asset_sample = assessment.asset.save_cmp_sample()
assert isinstance(dmg_sample, pd.DataFrame)
assert isinstance(asset_sample, pd.DataFrame)

# create a mapping for all components that are also in
# the prescribed consequence database
dmg_cmps = dmg_sample.columns.unique(level='cmp')
asset_cmps = asset_sample.columns.unique(level='cmp')
loss_cmps = conseq_df.index.unique(level=0)

drivers = []
Expand All @@ -1916,32 +1980,32 @@ def _loss__map_auto(
# with these methods, we assume fragility and consequence data
# have the same IDs

for dmg_cmp in dmg_cmps:
if dmg_cmp == 'collapse':
for asset_cmp in asset_cmps:
if asset_cmp == 'collapse':
continue

if dmg_cmp in loss_cmps:
drivers.append(dmg_cmp)
loss_models.append(dmg_cmp)
if asset_cmp in loss_cmps:
drivers.append(asset_cmp)
loss_models.append(asset_cmp)

elif dl_method in {
'Hazus Earthquake',
'Hazus Earthquake Transportation',
}:
# with Hazus Earthquake we assume that consequence
# archetypes are only differentiated by occupancy type
for dmg_cmp in dmg_cmps:
if dmg_cmp == 'collapse':
for asset_cmp in asset_cmps:
if asset_cmp == 'collapse':
continue

cmp_class = dmg_cmp.split('.')[0]
cmp_class = asset_cmp.split('.')[0]
if occupancy_type is not None:
loss_cmp = f'{cmp_class}.{occupancy_type}'
else:
loss_cmp = cmp_class

if loss_cmp in loss_cmps:
drivers.append(dmg_cmp)
drivers.append(asset_cmp)
loss_models.append(loss_cmp)

return pd.DataFrame(loss_models, columns=['Repair'], index=drivers)
Expand Down
Loading

0 comments on commit c97aaa8

Please sign in to comment.