-
Notifications
You must be signed in to change notification settings - Fork 712
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
🚀 Add nightly build to CI 🚚 Move PR template to .github root 🚀 Fix syntax * 🔨 Refactor model train tests into pre-merge and nightly. * 🩹 Fix file name issue for pytest and add init * 🚚 Move core tests to nightly * 🔨 fix import * Add nightly to tox and remove os.environ * Update thresholds * Fix isort issues * Fix isort issues * 🚚 move tests/nightly/core to tests/nightly/utils * 🚚 Move callback tests to pre-merge Change tox.ini to reflect new coverage threshold. Ref issue #94 * Rename setup in test helpers * 🩹 Fix tests Co-authored-by: Ashwin Vaidya <[email protected]>
- Loading branch information
1 parent
9d1c0b7
commit 5f2dda2
Showing
40 changed files
with
813 additions
and
200 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
name: Nightly-regression Test | ||
|
||
on: | ||
workflow_dispatch: # run on request (no need for PR) | ||
schedule: | ||
- cron: "0 0 * * *" | ||
|
||
jobs: | ||
Tox: | ||
runs-on: [self-hosted, linux, x64] | ||
strategy: | ||
max-parallel: 1 | ||
if: github.ref == 'refs/heads/development' | ||
steps: | ||
- name: Print GPU status | ||
run: nvidia-smi | ||
- name: CHECKOUT REPOSITORY | ||
uses: actions/checkout@v2 | ||
- name: Install Tox | ||
run: pip install tox | ||
- name: Coverage | ||
run: | | ||
export ANOMALIB_DATASET_PATH=/media/data1/datasets/MVTec | ||
export CUDA_VISIBLE_DEVICES=2 | ||
tox -e nightly | ||
- name: Upload coverage result | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: coverage | ||
path: .tox/coverage.xml |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
"""Utils to help in HPO search.""" | ||
|
||
# Copyright (C) 2020 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions | ||
# and limitations under the License. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
"""Utils to update configuration files.""" | ||
|
||
# Copyright (C) 2020 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions | ||
# and limitations under the License. | ||
|
||
from typing import List | ||
|
||
from omegaconf import DictConfig | ||
|
||
|
||
def flatten_sweep_params(params_dict: DictConfig) -> DictConfig: | ||
"""Flatten the nested parameters section of the config object. | ||
Args: | ||
params_dict: DictConfig: The dictionary containing the hpo parameters in the original, nested, structure. | ||
Returns: | ||
flattened version of the parameter dictionary. | ||
""" | ||
|
||
def process_params(nested_params: DictConfig, keys: List[str], flattened_params: DictConfig): | ||
"""Flatten nested dictionary. | ||
Recursive helper function that traverses the nested config object and stores the leaf nodes in a flattened | ||
dictionary. | ||
Args: | ||
nested_params: DictConfig: config object containing the original parameters. | ||
keys: List[str]: list of keys leading to the current location in the config. | ||
flattened_params: DictConfig: Dictionary in which the flattened parameters are stored. | ||
""" | ||
for name, cfg in nested_params.items(): | ||
if isinstance(cfg, DictConfig): | ||
process_params(cfg, keys + [str(name)], flattened_params) | ||
else: | ||
key = ".".join(keys + [str(name)]) | ||
flattened_params[key] = cfg | ||
|
||
flattened_params_dict = DictConfig({}) | ||
process_params(params_dict, [], flattened_params_dict) | ||
|
||
return flattened_params_dict |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,145 @@ | ||
"""Common helpers for both nightly and pre-merge model tests.""" | ||
|
||
# Copyright (C) 2020 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions | ||
# and limitations under the License. | ||
|
||
import os | ||
from typing import Dict, Tuple, Union | ||
|
||
import numpy as np | ||
from omegaconf import DictConfig, ListConfig | ||
from pytorch_lightning import LightningDataModule, Trainer | ||
from pytorch_lightning.callbacks import ModelCheckpoint | ||
|
||
from anomalib.config import get_configurable_parameters, update_nncf_config | ||
from anomalib.core.callbacks import get_callbacks | ||
from anomalib.core.callbacks.visualizer_callback import VisualizerCallback | ||
from anomalib.core.model.anomaly_module import AnomalyModule | ||
from anomalib.data import get_datamodule | ||
from anomalib.models import get_model | ||
|
||
|
||
def setup_model_train( | ||
model_name: str, | ||
dataset_path: str, | ||
project_path: str, | ||
nncf: bool, | ||
category: str, | ||
score_type: str = None, | ||
weight_file: str = "weights/last.ckpt", | ||
fast_run: bool = False, | ||
) -> Tuple[Union[DictConfig, ListConfig], LightningDataModule, AnomalyModule, Trainer]: | ||
"""Train the model based on the parameters passed. | ||
Args: | ||
model_name (str): Name of the model to train. | ||
dataset_path (str): Location of the dataset. | ||
project_path (str): Path to temporary project folder. | ||
nncf (bool): Add nncf callback. | ||
category (str): Category to train on. | ||
score_type (str, optional): Only used for DFM. Defaults to None. | ||
weight_file (str, optional): Path to weight file. | ||
fast_run (bool, optional): If set to true, the model trains for only 1 epoch. We train for one epoch as | ||
this ensures that both anomalous and non-anomalous images are present in the validation step. | ||
Returns: | ||
Tuple[DictConfig, LightningDataModule, AnomalyModule, Trainer]: config, datamodule, trained model, trainer | ||
""" | ||
config = get_configurable_parameters(model_name=model_name) | ||
if score_type is not None: | ||
config.model.score_type = score_type | ||
config.project.seed = 42 | ||
config.dataset.category = category | ||
config.dataset.path = dataset_path | ||
config.project.log_images_to = [] | ||
|
||
# If weight file is empty, remove the key from config | ||
if "weight_file" in config.model.keys() and weight_file == "": | ||
config.model.pop("weight_file") | ||
else: | ||
config.model.weight_file = weight_file | ||
|
||
if nncf: | ||
config.optimization.nncf.apply = True | ||
config = update_nncf_config(config) | ||
config.init_weights = None | ||
|
||
# reassign project path as config is updated in `update_config_for_nncf` | ||
config.project.path = project_path | ||
|
||
datamodule = get_datamodule(config) | ||
model = get_model(config) | ||
|
||
callbacks = get_callbacks(config) | ||
|
||
# Force model checkpoint to create checkpoint after first epoch | ||
if fast_run == True: | ||
for index, callback in enumerate(callbacks): | ||
if isinstance(callback, ModelCheckpoint): | ||
callbacks.pop(index) | ||
break | ||
model_checkpoint = ModelCheckpoint( | ||
dirpath=os.path.join(config.project.path, "weights"), | ||
filename="last", | ||
monitor=None, | ||
mode="max", | ||
save_last=True, | ||
auto_insert_metric_name=False, | ||
) | ||
callbacks.append(model_checkpoint) | ||
|
||
for index, callback in enumerate(callbacks): | ||
if isinstance(callback, VisualizerCallback): | ||
callbacks.pop(index) | ||
break | ||
|
||
# Train the model. | ||
if fast_run: | ||
config.trainer.max_epochs = 1 | ||
|
||
trainer = Trainer(callbacks=callbacks, **config.trainer) | ||
trainer.fit(model=model, datamodule=datamodule) | ||
return config, datamodule, model, trainer | ||
|
||
|
||
def model_load_test(config: Union[DictConfig, ListConfig], datamodule: LightningDataModule, results: Dict): | ||
"""Create a new model based on the weights specified in config. | ||
Args: | ||
config ([Union[DictConfig, ListConfig]): Model config. | ||
datamodule (LightningDataModule): Dataloader | ||
results (Dict): Results from original model. | ||
""" | ||
loaded_model = get_model(config) # get new model | ||
|
||
callbacks = get_callbacks(config) | ||
|
||
for index, callback in enumerate(callbacks): | ||
# Remove visualizer callback as saving results takes time | ||
if isinstance(callback, VisualizerCallback): | ||
callbacks.pop(index) | ||
break | ||
|
||
# create new trainer object with LoadModel callback (assumes it is present) | ||
trainer = Trainer(callbacks=callbacks, **config.trainer) | ||
# Assumes the new model has LoadModel callback and the old one had ModelCheckpoint callback | ||
new_results = trainer.test(model=loaded_model, datamodule=datamodule)[0] | ||
assert np.isclose( | ||
results["image_AUROC"], new_results["image_AUROC"] | ||
), "Loaded model does not yield close performance results" | ||
if config.dataset.task == "segmentation": | ||
assert np.isclose( | ||
results["pixel_AUROC"], new_results["pixel_AUROC"] | ||
), "Loaded model does not yield close performance results" |
Oops, something went wrong.