From 8f5b3376b4ef241b507ba32c102cb11dc931a051 Mon Sep 17 00:00:00 2001 From: Petr Matyas Date: Wed, 22 May 2024 11:48:37 +0200 Subject: [PATCH] WIP: Rewrite under test with again and failed-only --- tmt/base.py | 37 +--------------- tmt/cli.py | 7 +-- tmt/steps/__init__.py | 2 +- tmt/steps/discover/__init__.py | 78 ++++------------------------------ tmt/steps/discover/fmf.py | 4 ++ tmt/steps/discover/shell.py | 12 +++--- tmt/steps/execute/__init__.py | 26 ++---------- tmt/utils.py | 6 --- 8 files changed, 31 insertions(+), 141 deletions(-) diff --git a/tmt/base.py b/tmt/base.py index 8f0b0a37ed..138c72f401 100644 --- a/tmt/base.py +++ b/tmt/base.py @@ -1669,21 +1669,6 @@ def __init__( self._plan_environment = Environment() - # Set directory for last run execute data in case of a rerun - if self.is_rerun: - assert self.workdir is not None # narrow type - self.last_run_execute: Path = self.workdir / 'last_run_execute' - - # Store 'environment' and 'environment-file' keys content - self._environment = tmt.utils.environment_from_spec( - raw_fmf_environment_files=node.get("environment-file") or [], - raw_fmf_environment=node.get('environment', {}), - raw_cli_environment_files=self.opt('environment-file') or [], - raw_cli_environment=self.opt('environment'), - file_root=Path(node.root) if node.root else None, - key_address=node.name, - logger=self._logger) - # Expand all environment and context variables in the node with self.environment.as_environ(): expand_node_data(node.data, self._fmf_context) @@ -2248,23 +2233,6 @@ def _lint_step(step: str) -> LinterReturn: def wake(self) -> None: """ Wake up all steps """ - # Additional debug info like plan environment - self.debug('info', color='cyan', shift=0, level=3) - # TODO: something better than str()? - self.debug('environment', format_value(self.environment), 'magenta', level=3) - self.debug('context', format_value(self._fmf_context), 'magenta', level=3) - - # Save last run execute step if called with rerun - if self.is_rerun: - assert self.workdir is not None # narrow type - if not (self.workdir / 'execute').exists(): - raise tmt.utils.GeneralError( - "Run id has to be specified and " - "execute directory has to exist in order to use --rerun.") - self.debug(f"Saving last run execute into {self.last_run_execute}.") - shutil.copytree(self.workdir / 'execute', self.last_run_execute, dirs_exist_ok=True) - - # Wake up all steps self.debug('wake', color='cyan', shift=0, level=2) for step in self.steps(enabled_only=False): self.debug(str(step), color='blue', level=2) @@ -2335,10 +2303,9 @@ def go(self) -> None: try: for step in self.steps(skip=['finish']): step.go() - # Finish plan if no tests found (except dry mode and rerun) + # Finish plan if no tests found (except dry mode) if (isinstance(step, tmt.steps.discover.Discover) and not step.tests() - and not self.is_dry_run and not step.extract_tests_later - and not self.is_rerun): + and not self.is_dry_run and not step.extract_tests_later): step.info( 'warning', 'No tests found, finishing plan.', color='yellow', shift=1) diff --git a/tmt/cli.py b/tmt/cli.py index 060e6169a4..6294e8d1f0 100644 --- a/tmt/cli.py +++ b/tmt/cli.py @@ -383,9 +383,6 @@ def main( @option( '--scratch', is_flag=True, help='Remove the run workdir before executing to start from scratch.') -@option( - '--rerun', is_flag=True, - help='Rerun failed tests and update existing results.') @option( '--follow', is_flag=True, help='Output the logfile as it grows.') @@ -485,6 +482,10 @@ def run_plans(context: Context, **kwargs: Any) -> None: help=""" Filter by linked objects (regular expressions are supported for both relation and target). """) +@option( + '--failed-only', is_flag=True, default=False, + help="Filter failed tests from a previous run to run again" +) @verbosity_options def run_tests(context: Context, **kwargs: Any) -> None: """ diff --git a/tmt/steps/__init__.py b/tmt/steps/__init__.py index f24055ed5e..0080787d03 100644 --- a/tmt/steps/__init__.py +++ b/tmt/steps/__init__.py @@ -664,7 +664,7 @@ def wake(self) -> None: """ Wake up the step (process workdir and command line) """ # Cleanup possible old workdir if called with --force, but not # if running the step --again which should reuse saved step data - if (self.is_forced_run or self.is_rerun) and not self.should_run_again: + if self.is_forced_run and not self.should_run_again: self._workdir_cleanup() # Load stored data diff --git a/tmt/steps/discover/__init__.py b/tmt/steps/discover/__init__.py index 27a617729f..bfe1502c4d 100644 --- a/tmt/steps/discover/__init__.py +++ b/tmt/steps/discover/__init__.py @@ -17,7 +17,6 @@ import tmt.utils from tmt.options import option from tmt.plugins import PluginRegistry -from tmt.result import Result from tmt.steps import Action from tmt.utils import GeneralError, Path, field, key_to_option @@ -161,36 +160,19 @@ def post_dist_git(self, created_content: list[Path]) -> None: """ pass - def filter_for_rerun(self) -> None: - """ Filter out passed tests from previous run data """ - assert isinstance(self.step.parent, tmt.base.Plan) # narrow type - old_results: Path = self.step.parent.last_run_execute / 'results.yaml' - results = [ - Result.from_serialized(data) for data in - tmt.utils.yaml_to_list(self.read(old_results))] - results_failed: list[str] = [] - results_passed: list[Result] = [] + def get_failed_tests_from_execute(self) -> list[str]: + """ + Get list of all failed test case names from execute + Relevant for running again + """ + results = self.parent.parent.execute._results + failed_results = [] for result in results: if ( result.result is not tmt.result.ResultOutcome.PASS and result.result is not tmt.result.ResultOutcome.INFO): - results_failed.append(result.name) - else: - results_passed.append(result) - - # Overwrite previous run results to only include passed cases - self.debug( - f"Overwriting {old_results} to only include passed results: " - f"{', '.join([result.name for result in results_passed])}") - self.write( - old_results, - tmt.utils.dict_to_yaml([result.to_serialized() for result in results_passed])) - - tests_to_execute: list[tmt.base.Test] = [] - for test in self._tests: - if test.name in results_failed: - tests_to_execute.append(test) - self._tests: list[tmt.base.Test] = tests_to_execute + failed_results.append(result.name) + return failed_results class Discover(tmt.steps.Step): @@ -272,44 +254,6 @@ def save(self) -> None: self.write(Path('tests.yaml'), tmt.utils.dict_to_yaml(raw_test_data)) - def _filter_for_rerun(self) -> None: - """ Filter out passed tests from previous run data """ - assert isinstance(self.parent, tmt.base.Plan) # narrow type - old_results: Path = self.parent.last_run_execute / 'results.yaml' - results = [ - Result.from_serialized(data) for data in - tmt.utils.yaml_to_list(self.read(old_results))] - results_failed: list[Result] = [] - results_passed: list[Result] = [] - for result in results: - if ( - result.result is not tmt.result.ResultOutcome.PASS and - result.result is not tmt.result.ResultOutcome.INFO): - results_failed.append(result) - else: - results_passed.append(result) - - # Save positive results to specific results.yaml - old_results_positive: Path = ( - self.parent.last_run_execute / 'positive_results.yaml') - self.debug( - f"Save positive results from last run to {old_results_positive}, these are: " - f"{', '.join([result.name for result in results_passed])}") - self.write( - old_results_positive, - tmt.utils.dict_to_yaml([result.to_serialized() for result in results_passed])) - - # Filter out failed tests based on test name and serial number - filtered_tests: dict[str, list[tmt.base.Test]] = {} - for phase in self._tests: - current_phase_filtered: list[tmt.base.Test] = [] - for test in self._tests[phase]: - for result in results_failed: - if test.name == result.name and test.serial_number == result.serial_number: - current_phase_filtered.append(test) - filtered_tests[phase] = current_phase_filtered - self._tests = filtered_tests - def _discover_from_execute(self) -> None: """ Check the execute step for possible shell script tests """ @@ -432,10 +376,6 @@ def go(self, force: bool = False) -> None: for test in self.tests(): test.serial_number = self.plan.draw_test_serial_number(test) - # Filter selected tests if this is a rerun - if self.is_rerun: - self._filter_for_rerun() - # Show fmf identifiers for tests discovered in plan # TODO: This part should go into the 'fmf.py' module if self.opt('fmf_id'): diff --git a/tmt/steps/discover/fmf.py b/tmt/steps/discover/fmf.py index a09c47561d..59884cfbf4 100644 --- a/tmt/steps/discover/fmf.py +++ b/tmt/steps/discover/fmf.py @@ -555,6 +555,10 @@ def do_the_discovery(self, path: Optional[Path] = None) -> None: # Nothing was modified, do not select anything return + # Append failed tests from previous run to names filter if failed only argument is supplied + if self.get('failed-only'): + names.extend(self.get_failed_tests_from_execute()) + # Initialize the metadata tree, search for available tests self.debug(f"Check metadata tree in '{tree_path}'.") if self.is_dry_run: diff --git a/tmt/steps/discover/shell.py b/tmt/steps/discover/shell.py index fbb83faa3e..4ec4ce0e9d 100644 --- a/tmt/steps/discover/shell.py +++ b/tmt/steps/discover/shell.py @@ -439,15 +439,17 @@ def go(self) -> None: raise tmt.utils.DiscoverError( "Failed to process 'dist-git-source'.") from error + # Append failed tests from previous run to names filter if failed only argument is supplied + names = [] + if self.get('failed-only'): + names = self.get_failed_tests_from_execute() + # Use a tmt.Tree to apply possible command line filters self._tests = tmt.Tree( logger=self._logger, tree=tests).tests( - conditions=["manual is False"]) - - # Filter selected tests if this is a rerun - if self.is_rerun: - self.filter_for_rerun() + conditions=["manual is False"], + names=names) # Propagate `where` key and TMT_SOURCE_DIR for test in self._tests: diff --git a/tmt/steps/execute/__init__.py b/tmt/steps/execute/__init__.py index d4485bf5c0..a4f6b0eb5f 100644 --- a/tmt/steps/execute/__init__.py +++ b/tmt/steps/execute/__init__.py @@ -6,7 +6,6 @@ import signal as _signal import subprocess import threading -import shutil from contextlib import suppress from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union, cast @@ -815,29 +814,9 @@ def load(self) -> None: except tmt.utils.FileError: self.debug('Test results not found.', level=2) - def merge_results_rerun(self) -> None: - """ Merge new results with old ones for rerun """ - assert isinstance(self.parent, tmt.base.Plan) # narrow type - old_results: list[Result] = [ - Result.from_serialized(data) for data in - tmt.utils.yaml_to_list( - self.read(self.parent.last_run_execute / 'positive_results.yaml'))] - - for result in old_results: - # Add old results into new ones and copy log directories - self._results.append(result) - assert self.workdir is not None # narrow type - assert result.data_path is not None # narrow type - shutil.copytree( - self.parent.last_run_execute / result.data_path.parent, - self.workdir / result.data_path.parent, - dirs_exist_ok=True) - def save(self) -> None: """ Save test results to the workdir """ super().save() - if self.is_rerun and self.status() == 'done': - self.merge_results_rerun() results = [result.to_serialized() for result in self.results()] self.write(Path('results.yaml'), tmt.utils.dict_to_yaml(results)) @@ -886,9 +865,12 @@ def go(self, force: bool = False) -> None: super().go(force=force) # Clean up possible old results - if force or self.should_run_again: + if force: self._results.clear() + if self.should_run_again: + self.status('todo') + # Nothing more to do if already done if self.status() == 'done': self.info('status', 'done', 'green', shift=1) diff --git a/tmt/utils.py b/tmt/utils.py index 9858eef0d3..6f4a4ed4e8 100644 --- a/tmt/utils.py +++ b/tmt/utils.py @@ -1856,12 +1856,6 @@ def is_feeling_safe(self) -> bool: return self._get_cli_flag('is_feeling_safe', 'feeling_safe', False) - @property - def is_rerun(self) -> bool: - """ Whether the current run is a rerun and so allowed to overwrite files and data """ - - return self._get_cli_flag('is_rerun', 'rerun', False) - def _level(self) -> int: """ Hierarchy level """ if self.parent is None: