From e54b84a3808fd4c927f99e66027fb6c54102a466 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 24 May 2024 10:19:46 +0200 Subject: [PATCH 01/13] Support for custom verdicts --- src/etos_test_runner/lib/executor.py | 6 +- src/etos_test_runner/lib/testrunner.py | 83 +++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 10 deletions(-) diff --git a/src/etos_test_runner/lib/executor.py b/src/etos_test_runner/lib/executor.py index 36c3ee4..55ae4f9 100644 --- a/src/etos_test_runner/lib/executor.py +++ b/src/etos_test_runner/lib/executor.py @@ -99,6 +99,7 @@ def __init__(self, test, iut, etos): self.context = self.etos.config.get("context") self.plugins = self.etos.config.get("plugins") self.result = True + self.returncode = None def load_regex(self): """Attempt to load regex file from environment variables. @@ -404,11 +405,12 @@ def _execute(self, workspace): self.logger.info("Wait for test to finish.") # We must consume the iterator here, even if we do not parse the lines. - for _, line in iterator: + for proc, line in iterator: if self.test_regex: self.parse(line) self.result = line - self.logger.info("Finished with result %r.", self.result) + self.returncode = proc.returncode + self.logger.info("Finished with result %r, exit code: %d", self.result, self.returncode) def execute(self, workspace, retries=3): """Retry execution of test cases. diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 01f28ce..15cd6b8 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ETR test runner module.""" +import json import time import os import logging @@ -25,6 +26,42 @@ from etos_test_runner.lib.log_area import LogArea +class CustomVerdictMatcher: + """Match testframework output against user-defined verdict rules.""" + + SUPPORTED_CONDITION_KEYWORDS = [ + "test_framework_exit_code", + ] + + def __init__(self, rules, test_framework_output): + """Constructor.""" + self.rules = rules + self.test_framework_output = test_framework_output + + for rule in self.rules: + for key in rule["condition"].keys(): + if key not in self.SUPPORTED_CONDITION_KEYWORDS: + raise ValueError(f"Unsupported condition keyword for test outcome rules: {kw}! " + f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}.") + + def _evaluate_rule(self, rule) -> bool: + """Evaluate conditions within the given rule.""" + for kw, expected_value in rule["condition"].items(): + # logical AND: return False as soon as a false statement is encountered: + if kw == "test_framework_exit_code" and "test_framework_exit_code" in self.test_framework_output.keys(): + if self.test_framework_output["test_framework_exit_code"] != expected_value: + return False + # implement more keywords if needed + return True + + def evaluate(self) -> dict: + """Evaluate the list of given rules and return the first match.""" + for rule in self.rules: + if self._evaluate_rule(rule): + return rule + return None + + class TestRunner: """Test runner for ETOS.""" @@ -91,7 +128,7 @@ def environment(self, context): host={"name": os.getenv("EXECUTION_SPACE_URL"), "user": "etos"}, ) - def run_tests(self, workspace): + def run_tests(self, workspace) -> tuple[bool, int]: """Execute test recipes within a test executor. :param workspace: Which workspace to execute test suite within. @@ -106,15 +143,27 @@ def run_tests(self, workspace): with Executor(test, self.iut, self.etos) as executor: self.logger.info("Starting test '%s'", executor.test_name) executor.execute(workspace) - if not executor.result: result = executor.result - self.logger.info("Test finished. Result: %s.", executor.result) - return result + self.logger.info("Test finished. Result: %s. Test framework exit code: %d", executor.result, executor.returncode) + return result, executor.returncode - def outcome(self, result, executed, description): + def outcome(self, result, executed, description, test_framework_exit_code): """Get outcome from test execution. + Example rule definition: + + rules = [ + { + "description": "Test collection error, no artifacts created", + "condition": { + "test_framework_exit_code": 4, + }, + "conclusion": "FAILED", + "verdict": "FAILED", + } + ] + :param result: Result of execution. :type result: bool :param executed: Whether or not tests have successfully executed. @@ -124,7 +173,24 @@ def outcome(self, result, executed, description): :return: Outcome of test execution. :rtype: dict """ - if executed: + verdict_rule_file = os.getenv("VERDICT_RULE_FILE") + custom_verdict = None + if verdict_rule_file is not None: + test_framework_output = { + "test_framework_exit_code": test_framework_exit_code, + } + with open(os.getenv("VERDICT_RULE_FILE"), "r") as inp: + rules = json.load(inp) + cvm = CustomVerdictMatcher(rules, test_framework_output) + custom_verdict = cvm.evaluate() + if None not in(verdict_rule_file, custom_verdict): + conclusion = custom_verdict["conclusion"] + verdict = custom_verdict["verdict"] + description = custom_verdict["description"] + self.logger.info( + "Verdict matches testrunner verdict rule: %s", custom_verdict + ) + elif executed: conclusion = "SUCCESSFUL" verdict = "PASSED" if result else "FAILED" self.logger.info( @@ -205,12 +271,13 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen result = True description = None executed = False + test_framework_exit_code = None try: with Workspace(self.log_area) as workspace: self.logger.info("Start IUT monitoring.") self.iut_monitoring.start_monitoring() self.logger.info("Starting test executor.") - result = self.run_tests(workspace) + result, test_framework_exit_code = self.run_tests(workspace) executed = True self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() @@ -224,7 +291,7 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() self.logger.info("Figure out test outcome.") - outcome = self.outcome(result, executed, description) + outcome = self.outcome(result, executed, description, test_framework_exit_code) pprint(outcome) self.logger.info("Send test suite finished event.") From e231e40f1118193a3afd83b9520b6b15336e52d0 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Tue, 28 May 2024 12:20:14 +0200 Subject: [PATCH 02/13] code review changes --- src/etos_test_runner/lib/executor.py | 13 ++++- src/etos_test_runner/lib/testrunner.py | 70 +++++++++++++++----------- 2 files changed, 52 insertions(+), 31 deletions(-) diff --git a/src/etos_test_runner/lib/executor.py b/src/etos_test_runner/lib/executor.py index 55ae4f9..2fa6f2e 100644 --- a/src/etos_test_runner/lib/executor.py +++ b/src/etos_test_runner/lib/executor.py @@ -405,12 +405,21 @@ def _execute(self, workspace): self.logger.info("Wait for test to finish.") # We must consume the iterator here, even if we do not parse the lines. + proc = None + line = "" for proc, line in iterator: if self.test_regex: self.parse(line) self.result = line - self.returncode = proc.returncode - self.logger.info("Finished with result %r, exit code: %d", self.result, self.returncode) + if proc is not None: + self.returncode = proc.returncode + self.logger.info( + "Finished with result %r, exit code: %d", + self.result, + self.returncode, + ) + else: + self.logger.info("Finished with result %r", self.result) def execute(self, workspace, retries=3): """Retry execution of test cases. diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 15cd6b8..2dc522e 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -19,6 +19,7 @@ import os import logging from pprint import pprint +from typing import Union from etos_test_runner.lib.iut_monitoring import IutMonitoring from etos_test_runner.lib.executor import Executor @@ -27,34 +28,54 @@ class CustomVerdictMatcher: - """Match testframework output against user-defined verdict rules.""" + # pylint: disable=too-few-public-methods + """Match testframework output against user-defined verdict rules. + + Example rule definition: + + rules = [ + { + "description": "Test collection error, no artifacts created", + "condition": { + "test_framework_exit_code": 4, + }, + "conclusion": "FAILED", + "verdict": "FAILED", + } + ] + """ SUPPORTED_CONDITION_KEYWORDS = [ "test_framework_exit_code", ] - def __init__(self, rules, test_framework_output): - """Constructor.""" + def __init__(self, rules: list, test_framework_output: dict) -> None: + """Create new instance.""" self.rules = rules self.test_framework_output = test_framework_output for rule in self.rules: for key in rule["condition"].keys(): if key not in self.SUPPORTED_CONDITION_KEYWORDS: - raise ValueError(f"Unsupported condition keyword for test outcome rules: {kw}! " - f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}.") + raise ValueError( + f"Unsupported condition keyword for test outcome rules: {key}! " + f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." + ) - def _evaluate_rule(self, rule) -> bool: + def _evaluate_rule(self, rule: dict) -> bool: """Evaluate conditions within the given rule.""" for kw, expected_value in rule["condition"].items(): # logical AND: return False as soon as a false statement is encountered: - if kw == "test_framework_exit_code" and "test_framework_exit_code" in self.test_framework_output.keys(): + if ( + kw == "test_framework_exit_code" + and "test_framework_exit_code" in self.test_framework_output.keys() + ): if self.test_framework_output["test_framework_exit_code"] != expected_value: return False # implement more keywords if needed return True - def evaluate(self) -> dict: + def evaluate(self) -> Union[dict, None]: """Evaluate the list of given rules and return the first match.""" for rule in self.rules: if self._evaluate_rule(rule): @@ -128,7 +149,7 @@ def environment(self, context): host={"name": os.getenv("EXECUTION_SPACE_URL"), "user": "etos"}, ) - def run_tests(self, workspace) -> tuple[bool, int]: + def run_tests(self, workspace: Workspace) -> tuple[bool, int]: """Execute test recipes within a test executor. :param workspace: Which workspace to execute test suite within. @@ -145,25 +166,18 @@ def run_tests(self, workspace) -> tuple[bool, int]: executor.execute(workspace) if not executor.result: result = executor.result - self.logger.info("Test finished. Result: %s. Test framework exit code: %d", executor.result, executor.returncode) + self.logger.info( + "Test finished. Result: %s. Test framework exit code: %d", + executor.result, + executor.returncode, + ) return result, executor.returncode - def outcome(self, result, executed, description, test_framework_exit_code): + def outcome( + self, result: bool, executed: bool, description: str, test_framework_exit_code: int + ) -> dict: """Get outcome from test execution. - Example rule definition: - - rules = [ - { - "description": "Test collection error, no artifacts created", - "condition": { - "test_framework_exit_code": 4, - }, - "conclusion": "FAILED", - "verdict": "FAILED", - } - ] - :param result: Result of execution. :type result: bool :param executed: Whether or not tests have successfully executed. @@ -179,17 +193,15 @@ def outcome(self, result, executed, description, test_framework_exit_code): test_framework_output = { "test_framework_exit_code": test_framework_exit_code, } - with open(os.getenv("VERDICT_RULE_FILE"), "r") as inp: + with open(os.getenv("VERDICT_RULE_FILE"), "r", encoding="utf-8") as inp: rules = json.load(inp) cvm = CustomVerdictMatcher(rules, test_framework_output) custom_verdict = cvm.evaluate() - if None not in(verdict_rule_file, custom_verdict): + if None not in (verdict_rule_file, custom_verdict): conclusion = custom_verdict["conclusion"] verdict = custom_verdict["verdict"] description = custom_verdict["description"] - self.logger.info( - "Verdict matches testrunner verdict rule: %s", custom_verdict - ) + self.logger.info("Verdict matches testrunner verdict rule: %s", custom_verdict) elif executed: conclusion = "SUCCESSFUL" verdict = "PASSED" if result else "FAILED" From 5c7f0bcc19859ff55aba2d8ae90321d00602b0e5 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Thu, 30 May 2024 12:27:29 +0200 Subject: [PATCH 03/13] VerdictMatcher draft --- src/etos_test_runner/lib/testrunner.py | 161 +++++-------------------- src/etos_test_runner/lib/verdict.py | 156 ++++++++++++++++++++++++ 2 files changed, 189 insertions(+), 128 deletions(-) create mode 100644 src/etos_test_runner/lib/verdict.py diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 2dc522e..3929f81 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -25,62 +25,7 @@ from etos_test_runner.lib.executor import Executor from etos_test_runner.lib.workspace import Workspace from etos_test_runner.lib.log_area import LogArea - - -class CustomVerdictMatcher: - # pylint: disable=too-few-public-methods - """Match testframework output against user-defined verdict rules. - - Example rule definition: - - rules = [ - { - "description": "Test collection error, no artifacts created", - "condition": { - "test_framework_exit_code": 4, - }, - "conclusion": "FAILED", - "verdict": "FAILED", - } - ] - """ - - SUPPORTED_CONDITION_KEYWORDS = [ - "test_framework_exit_code", - ] - - def __init__(self, rules: list, test_framework_output: dict) -> None: - """Create new instance.""" - self.rules = rules - self.test_framework_output = test_framework_output - - for rule in self.rules: - for key in rule["condition"].keys(): - if key not in self.SUPPORTED_CONDITION_KEYWORDS: - raise ValueError( - f"Unsupported condition keyword for test outcome rules: {key}! " - f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." - ) - - def _evaluate_rule(self, rule: dict) -> bool: - """Evaluate conditions within the given rule.""" - for kw, expected_value in rule["condition"].items(): - # logical AND: return False as soon as a false statement is encountered: - if ( - kw == "test_framework_exit_code" - and "test_framework_exit_code" in self.test_framework_output.keys() - ): - if self.test_framework_output["test_framework_exit_code"] != expected_value: - return False - # implement more keywords if needed - return True - - def evaluate(self) -> Union[dict, None]: - """Evaluate the list of given rules and return the first match.""" - for rule in self.rules: - if self._evaluate_rule(rule): - return rule - return None +from etos_test_runner.lib.verdict import VerdictMatcher class TestRunner: @@ -105,6 +50,13 @@ def __init__(self, iut, etos): self.issuer = {"name": "ETOS Test Runner"} self.etos.config.set("iut", self.iut) self.plugins = self.etos.config.get("plugins") + rules = [] + if os.getenv("VERDICT_RULE_FILE"): + with open(os.getenv("VERDICT_RULE_FILE"), "r", encoding="utf-8") as inp: + rules = json.load(inp) + self.verdict_matcher = VerdictMatcher(rules=rules) + else: + self.verdict_matcher = VerdictMatcher() def test_suite_started(self): """Publish a test suite started event. @@ -149,91 +101,49 @@ def environment(self, context): host={"name": os.getenv("EXECUTION_SPACE_URL"), "user": "etos"}, ) - def run_tests(self, workspace: Workspace) -> tuple[bool, int]: + def run_tests(self, workspace: Workspace) -> list[Union[int, None]]: """Execute test recipes within a test executor. :param workspace: Which workspace to execute test suite within. :type workspace: :obj:`etr.lib.workspace.Workspace` - :return: Result of test execution. - :rtype: bool + :return: List of test framework exit codes for each recipe. + :rtype: list of int or None instances """ recipes = self.config.get("recipes") - result = True + test_framework_exit_codes = [] for num, test in enumerate(recipes): self.logger.info("Executing test %s/%s", num + 1, len(recipes)) with Executor(test, self.iut, self.etos) as executor: self.logger.info("Starting test '%s'", executor.test_name) executor.execute(workspace) - if not executor.result: - result = executor.result - self.logger.info( - "Test finished. Result: %s. Test framework exit code: %d", - executor.result, - executor.returncode, - ) - return result, executor.returncode + if executor.returncode is not None: + self.logger.info( + "Test finished. Test framework exit code: %d", + executor.returncode, + ) + else: + self.logger.info("Test finished. Test framework exit code is None.") + test_framework_exit_codes.append(executor.returncode) + return executor.returncode def outcome( - self, result: bool, executed: bool, description: str, test_framework_exit_code: int + self, detailed_description: str, test_framework_exit_codes: list[Union[int, None]] ) -> dict: """Get outcome from test execution. - :param result: Result of execution. - :type result: bool - :param executed: Whether or not tests have successfully executed. - :type executed: bool - :param description: Optional description. - :type description: str + :param detailed_description: Optional detailed description. + :type detailed_description: str + :test_framework_exit_codes: list of exit codes for each recipe + : :return: Outcome of test execution. :rtype: dict """ - verdict_rule_file = os.getenv("VERDICT_RULE_FILE") - custom_verdict = None - if verdict_rule_file is not None: - test_framework_output = { - "test_framework_exit_code": test_framework_exit_code, - } - with open(os.getenv("VERDICT_RULE_FILE"), "r", encoding="utf-8") as inp: - rules = json.load(inp) - cvm = CustomVerdictMatcher(rules, test_framework_output) - custom_verdict = cvm.evaluate() - if None not in (verdict_rule_file, custom_verdict): - conclusion = custom_verdict["conclusion"] - verdict = custom_verdict["verdict"] - description = custom_verdict["description"] - self.logger.info("Verdict matches testrunner verdict rule: %s", custom_verdict) - elif executed: - conclusion = "SUCCESSFUL" - verdict = "PASSED" if result else "FAILED" - self.logger.info( - "Tests executed successfully. Verdict set to '%s' due to result being '%s'", - verdict, - result, - ) - else: - conclusion = "FAILED" - verdict = "INCONCLUSIVE" - self.logger.info( - "Tests did not execute successfully. Setting verdict to '%s'", - verdict, - ) - - suite_name = self.config.get("name") - if not description and not result: - self.logger.info("No description but result is a failure. At least some tests failed.") - description = f"At least some {suite_name} tests failed." - elif not description and result: - self.logger.info( - "No description and result is a success. All tests executed successfully." - ) - description = f"All {suite_name} tests completed successfully." - else: - self.logger.info("Description was set. Probably due to an exception.") - return { - "verdict": verdict, - "description": description, - "conclusion": conclusion, + test_framework_output = { + "test_framework_exit_codes": test_framework_exit_codes } + verdict = self.verdict_matcher.evaluate(test_framework_output) + verdict["detailed_description"] = detailed_description + return verdict def _test_suite_triggered(self, name): """Call on_test_suite_triggered for all ETR plugins. @@ -280,22 +190,17 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen self.environment(sub_suite_id) self.etos.config.set("sub_suite_id", sub_suite_id) - result = True description = None - executed = False test_framework_exit_code = None try: with Workspace(self.log_area) as workspace: self.logger.info("Start IUT monitoring.") self.iut_monitoring.start_monitoring() self.logger.info("Starting test executor.") - result, test_framework_exit_code = self.run_tests(workspace) - executed = True + test_framework_exit_code = self.run_tests(workspace) self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() except Exception as exception: # pylint:disable=broad-except - result = False - executed = False description = str(exception) raise finally: @@ -303,7 +208,7 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() self.logger.info("Figure out test outcome.") - outcome = self.outcome(result, executed, description, test_framework_exit_code) + outcome = self.outcome(description, test_framework_exit_code) pprint(outcome) self.logger.info("Send test suite finished event.") diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py new file mode 100644 index 0000000..72c2bc4 --- /dev/null +++ b/src/etos_test_runner/lib/verdict.py @@ -0,0 +1,156 @@ +# Copyright 2020-2021 Axis Communications AB. +# +# For a full list of individual contributors, please see the commit history. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Verdict-related classes and functions.""" +from typing import Union + +DEFAULT_RULES = [ + { + "description": "Executed, no errors", + "condition" : { + "test_framework_exit_codes": { + "match": "all", + "op": "eq", + "value": 0, + } + }, + "conclusion": "SUCCESSFUL", + "verdict": "PASSED", + }, + { + "description": "Executed with errors", + "condition" : { + "test_framework_exit_codes": { + "match": "some", + "op": "gte", + "value": 1, + }, + }, + "conclusion": "SUCCESSFUL", + "verdict": "FAILED", + }, + { + "description": "Abnormal termination due to an exception", + "condition" : { + "test_framework_exit_codes": { + "match": "some", + "op": "eq", + "value": None, + }, + }, + "conclusion": "INCONCLUSIVE", + "verdict": "FAILED", + }, +] + +class ConditionEvaluator: + """Evaluate a list of exit codes against a condition.""" + def __init__(self, exit_codes, condition): + self.exit_codes = exit_codes + self.condition = condition + + def evaluate(self) -> bool: + """Run evaluation.""" + match = self.condition.get("match") + op = self.condition.get("op") + value = self.condition.get("value") + if match == "all": + if not all(self._evaluate_expression(op, value, exit_code) for exit_code in self.exit_codes): + return False + elif match == "some": + if not any(self._evaluate_expression(op, value, exit_code) for exit_code in self.exit_codes): + return False + elif match == "none": + if any(self._evaluate_expression(op, value, exit_code) for exit_code in self.exit_codes): + return False + return True + + def _evaluate_expression(self, op, value, exit_code) -> bool: + """"Evaluate a single exit code against the condition.""" + if op == "eq": + return exit_code == value + elif op == "neq": + return exit_code != value + elif op == "gt": + return exit_code > value + elif op == "lt": + return exit_code < value + elif op == "gte": + return exit_code >= value + elif op == "lte": + return exit_code <= value + else: + raise ValueError(f"Unsupported operator: {op}") + + +class VerdictMatcher: + """Verdict matcher.""" + + REQUIRED_RULE_PARAMETERS = { + "description", + "condition", + "conclusion", + "verdict", + } + + SUPPORTED_CONDITION_KEYWORDS = { + "test_framework_exit_codes", + } + + SUPPORTED_EXPRESSION_OPERATORS = { + "eq", + "neq", + "gt", + "lt", + "gte", + "lte", + }, + SUPPORTED_EXPRESSION_MATCH_OPERATORS = { + "all", + "some", + "none", + } + + def __init__(self, rules: list = []): + self.rules = rules if rules else DEFAULT_RULES + for rule in self.rules: + # Make sure all rules have required parameters: + if set(rule.keys()) != self.REQUIRED_RULE_PARAMETERS: + raise ValueError( + f"Not all rule keywords are given in the rule: {rule}! " + f"Required keywords: {self.REQUIRED_RULE_PARAMETERS}." + ) + # Make sure the rule's condition is not empty + if len(rule["condition"].keys()) == 0: + raise ValueError(f"No keywords are given in the rule condition: {rule}") + + # For each expression of the condition: + for keyword, expression in rule["condition"].keys(): + + # All keywords shall be supported: + if keyword not in self.SUPPORTED_CONDITION_KEYWORDS: + raise ValueError( + f"Unsupported condition keyword for test outcome rules: {keyword}! " + f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." + ) + + def evaluate(self, test_framework_output: dict) -> Union[dict, None]: + """Evaluate the list of given rules and return the first match.""" + for rule in self.rules: + ce = ConditionEvaluator(test_framework_output["test_framework_exit_codes"], rule["condition"]) + if ce.evaluate(): + return rule + return None + From 654d08807c36e006176f5575b21abe678bce6d81 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 7 Jun 2024 07:42:22 +0200 Subject: [PATCH 04/13] Revert "VerdictMatcher draft" This reverts commit 5c7f0bcc19859ff55aba2d8ae90321d00602b0e5. --- src/etos_test_runner/lib/testrunner.py | 161 ++++++++++++++++++++----- src/etos_test_runner/lib/verdict.py | 156 ------------------------ 2 files changed, 128 insertions(+), 189 deletions(-) delete mode 100644 src/etos_test_runner/lib/verdict.py diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 3929f81..2dc522e 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -25,7 +25,62 @@ from etos_test_runner.lib.executor import Executor from etos_test_runner.lib.workspace import Workspace from etos_test_runner.lib.log_area import LogArea -from etos_test_runner.lib.verdict import VerdictMatcher + + +class CustomVerdictMatcher: + # pylint: disable=too-few-public-methods + """Match testframework output against user-defined verdict rules. + + Example rule definition: + + rules = [ + { + "description": "Test collection error, no artifacts created", + "condition": { + "test_framework_exit_code": 4, + }, + "conclusion": "FAILED", + "verdict": "FAILED", + } + ] + """ + + SUPPORTED_CONDITION_KEYWORDS = [ + "test_framework_exit_code", + ] + + def __init__(self, rules: list, test_framework_output: dict) -> None: + """Create new instance.""" + self.rules = rules + self.test_framework_output = test_framework_output + + for rule in self.rules: + for key in rule["condition"].keys(): + if key not in self.SUPPORTED_CONDITION_KEYWORDS: + raise ValueError( + f"Unsupported condition keyword for test outcome rules: {key}! " + f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." + ) + + def _evaluate_rule(self, rule: dict) -> bool: + """Evaluate conditions within the given rule.""" + for kw, expected_value in rule["condition"].items(): + # logical AND: return False as soon as a false statement is encountered: + if ( + kw == "test_framework_exit_code" + and "test_framework_exit_code" in self.test_framework_output.keys() + ): + if self.test_framework_output["test_framework_exit_code"] != expected_value: + return False + # implement more keywords if needed + return True + + def evaluate(self) -> Union[dict, None]: + """Evaluate the list of given rules and return the first match.""" + for rule in self.rules: + if self._evaluate_rule(rule): + return rule + return None class TestRunner: @@ -50,13 +105,6 @@ def __init__(self, iut, etos): self.issuer = {"name": "ETOS Test Runner"} self.etos.config.set("iut", self.iut) self.plugins = self.etos.config.get("plugins") - rules = [] - if os.getenv("VERDICT_RULE_FILE"): - with open(os.getenv("VERDICT_RULE_FILE"), "r", encoding="utf-8") as inp: - rules = json.load(inp) - self.verdict_matcher = VerdictMatcher(rules=rules) - else: - self.verdict_matcher = VerdictMatcher() def test_suite_started(self): """Publish a test suite started event. @@ -101,49 +149,91 @@ def environment(self, context): host={"name": os.getenv("EXECUTION_SPACE_URL"), "user": "etos"}, ) - def run_tests(self, workspace: Workspace) -> list[Union[int, None]]: + def run_tests(self, workspace: Workspace) -> tuple[bool, int]: """Execute test recipes within a test executor. :param workspace: Which workspace to execute test suite within. :type workspace: :obj:`etr.lib.workspace.Workspace` - :return: List of test framework exit codes for each recipe. - :rtype: list of int or None instances + :return: Result of test execution. + :rtype: bool """ recipes = self.config.get("recipes") - test_framework_exit_codes = [] + result = True for num, test in enumerate(recipes): self.logger.info("Executing test %s/%s", num + 1, len(recipes)) with Executor(test, self.iut, self.etos) as executor: self.logger.info("Starting test '%s'", executor.test_name) executor.execute(workspace) - if executor.returncode is not None: - self.logger.info( - "Test finished. Test framework exit code: %d", - executor.returncode, - ) - else: - self.logger.info("Test finished. Test framework exit code is None.") - test_framework_exit_codes.append(executor.returncode) - return executor.returncode + if not executor.result: + result = executor.result + self.logger.info( + "Test finished. Result: %s. Test framework exit code: %d", + executor.result, + executor.returncode, + ) + return result, executor.returncode def outcome( - self, detailed_description: str, test_framework_exit_codes: list[Union[int, None]] + self, result: bool, executed: bool, description: str, test_framework_exit_code: int ) -> dict: """Get outcome from test execution. - :param detailed_description: Optional detailed description. - :type detailed_description: str - :test_framework_exit_codes: list of exit codes for each recipe - : + :param result: Result of execution. + :type result: bool + :param executed: Whether or not tests have successfully executed. + :type executed: bool + :param description: Optional description. + :type description: str :return: Outcome of test execution. :rtype: dict """ - test_framework_output = { - "test_framework_exit_codes": test_framework_exit_codes + verdict_rule_file = os.getenv("VERDICT_RULE_FILE") + custom_verdict = None + if verdict_rule_file is not None: + test_framework_output = { + "test_framework_exit_code": test_framework_exit_code, + } + with open(os.getenv("VERDICT_RULE_FILE"), "r", encoding="utf-8") as inp: + rules = json.load(inp) + cvm = CustomVerdictMatcher(rules, test_framework_output) + custom_verdict = cvm.evaluate() + if None not in (verdict_rule_file, custom_verdict): + conclusion = custom_verdict["conclusion"] + verdict = custom_verdict["verdict"] + description = custom_verdict["description"] + self.logger.info("Verdict matches testrunner verdict rule: %s", custom_verdict) + elif executed: + conclusion = "SUCCESSFUL" + verdict = "PASSED" if result else "FAILED" + self.logger.info( + "Tests executed successfully. Verdict set to '%s' due to result being '%s'", + verdict, + result, + ) + else: + conclusion = "FAILED" + verdict = "INCONCLUSIVE" + self.logger.info( + "Tests did not execute successfully. Setting verdict to '%s'", + verdict, + ) + + suite_name = self.config.get("name") + if not description and not result: + self.logger.info("No description but result is a failure. At least some tests failed.") + description = f"At least some {suite_name} tests failed." + elif not description and result: + self.logger.info( + "No description and result is a success. All tests executed successfully." + ) + description = f"All {suite_name} tests completed successfully." + else: + self.logger.info("Description was set. Probably due to an exception.") + return { + "verdict": verdict, + "description": description, + "conclusion": conclusion, } - verdict = self.verdict_matcher.evaluate(test_framework_output) - verdict["detailed_description"] = detailed_description - return verdict def _test_suite_triggered(self, name): """Call on_test_suite_triggered for all ETR plugins. @@ -190,17 +280,22 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen self.environment(sub_suite_id) self.etos.config.set("sub_suite_id", sub_suite_id) + result = True description = None + executed = False test_framework_exit_code = None try: with Workspace(self.log_area) as workspace: self.logger.info("Start IUT monitoring.") self.iut_monitoring.start_monitoring() self.logger.info("Starting test executor.") - test_framework_exit_code = self.run_tests(workspace) + result, test_framework_exit_code = self.run_tests(workspace) + executed = True self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() except Exception as exception: # pylint:disable=broad-except + result = False + executed = False description = str(exception) raise finally: @@ -208,7 +303,7 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() self.logger.info("Figure out test outcome.") - outcome = self.outcome(description, test_framework_exit_code) + outcome = self.outcome(result, executed, description, test_framework_exit_code) pprint(outcome) self.logger.info("Send test suite finished event.") diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py deleted file mode 100644 index 72c2bc4..0000000 --- a/src/etos_test_runner/lib/verdict.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2020-2021 Axis Communications AB. -# -# For a full list of individual contributors, please see the commit history. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Verdict-related classes and functions.""" -from typing import Union - -DEFAULT_RULES = [ - { - "description": "Executed, no errors", - "condition" : { - "test_framework_exit_codes": { - "match": "all", - "op": "eq", - "value": 0, - } - }, - "conclusion": "SUCCESSFUL", - "verdict": "PASSED", - }, - { - "description": "Executed with errors", - "condition" : { - "test_framework_exit_codes": { - "match": "some", - "op": "gte", - "value": 1, - }, - }, - "conclusion": "SUCCESSFUL", - "verdict": "FAILED", - }, - { - "description": "Abnormal termination due to an exception", - "condition" : { - "test_framework_exit_codes": { - "match": "some", - "op": "eq", - "value": None, - }, - }, - "conclusion": "INCONCLUSIVE", - "verdict": "FAILED", - }, -] - -class ConditionEvaluator: - """Evaluate a list of exit codes against a condition.""" - def __init__(self, exit_codes, condition): - self.exit_codes = exit_codes - self.condition = condition - - def evaluate(self) -> bool: - """Run evaluation.""" - match = self.condition.get("match") - op = self.condition.get("op") - value = self.condition.get("value") - if match == "all": - if not all(self._evaluate_expression(op, value, exit_code) for exit_code in self.exit_codes): - return False - elif match == "some": - if not any(self._evaluate_expression(op, value, exit_code) for exit_code in self.exit_codes): - return False - elif match == "none": - if any(self._evaluate_expression(op, value, exit_code) for exit_code in self.exit_codes): - return False - return True - - def _evaluate_expression(self, op, value, exit_code) -> bool: - """"Evaluate a single exit code against the condition.""" - if op == "eq": - return exit_code == value - elif op == "neq": - return exit_code != value - elif op == "gt": - return exit_code > value - elif op == "lt": - return exit_code < value - elif op == "gte": - return exit_code >= value - elif op == "lte": - return exit_code <= value - else: - raise ValueError(f"Unsupported operator: {op}") - - -class VerdictMatcher: - """Verdict matcher.""" - - REQUIRED_RULE_PARAMETERS = { - "description", - "condition", - "conclusion", - "verdict", - } - - SUPPORTED_CONDITION_KEYWORDS = { - "test_framework_exit_codes", - } - - SUPPORTED_EXPRESSION_OPERATORS = { - "eq", - "neq", - "gt", - "lt", - "gte", - "lte", - }, - SUPPORTED_EXPRESSION_MATCH_OPERATORS = { - "all", - "some", - "none", - } - - def __init__(self, rules: list = []): - self.rules = rules if rules else DEFAULT_RULES - for rule in self.rules: - # Make sure all rules have required parameters: - if set(rule.keys()) != self.REQUIRED_RULE_PARAMETERS: - raise ValueError( - f"Not all rule keywords are given in the rule: {rule}! " - f"Required keywords: {self.REQUIRED_RULE_PARAMETERS}." - ) - # Make sure the rule's condition is not empty - if len(rule["condition"].keys()) == 0: - raise ValueError(f"No keywords are given in the rule condition: {rule}") - - # For each expression of the condition: - for keyword, expression in rule["condition"].keys(): - - # All keywords shall be supported: - if keyword not in self.SUPPORTED_CONDITION_KEYWORDS: - raise ValueError( - f"Unsupported condition keyword for test outcome rules: {keyword}! " - f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." - ) - - def evaluate(self, test_framework_output: dict) -> Union[dict, None]: - """Evaluate the list of given rules and return the first match.""" - for rule in self.rules: - ce = ConditionEvaluator(test_framework_output["test_framework_exit_codes"], rule["condition"]) - if ce.evaluate(): - return rule - return None - From 84524150b1d3be417c08f97c245a41d671092568 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 7 Jun 2024 12:25:12 +0200 Subject: [PATCH 05/13] code review changes --- src/etos_test_runner/lib/testrunner.py | 122 +++++++++---------------- 1 file changed, 45 insertions(+), 77 deletions(-) diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 2dc522e..4971fa0 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -25,63 +25,7 @@ from etos_test_runner.lib.executor import Executor from etos_test_runner.lib.workspace import Workspace from etos_test_runner.lib.log_area import LogArea - - -class CustomVerdictMatcher: - # pylint: disable=too-few-public-methods - """Match testframework output against user-defined verdict rules. - - Example rule definition: - - rules = [ - { - "description": "Test collection error, no artifacts created", - "condition": { - "test_framework_exit_code": 4, - }, - "conclusion": "FAILED", - "verdict": "FAILED", - } - ] - """ - - SUPPORTED_CONDITION_KEYWORDS = [ - "test_framework_exit_code", - ] - - def __init__(self, rules: list, test_framework_output: dict) -> None: - """Create new instance.""" - self.rules = rules - self.test_framework_output = test_framework_output - - for rule in self.rules: - for key in rule["condition"].keys(): - if key not in self.SUPPORTED_CONDITION_KEYWORDS: - raise ValueError( - f"Unsupported condition keyword for test outcome rules: {key}! " - f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." - ) - - def _evaluate_rule(self, rule: dict) -> bool: - """Evaluate conditions within the given rule.""" - for kw, expected_value in rule["condition"].items(): - # logical AND: return False as soon as a false statement is encountered: - if ( - kw == "test_framework_exit_code" - and "test_framework_exit_code" in self.test_framework_output.keys() - ): - if self.test_framework_output["test_framework_exit_code"] != expected_value: - return False - # implement more keywords if needed - return True - - def evaluate(self) -> Union[dict, None]: - """Evaluate the list of given rules and return the first match.""" - for rule in self.rules: - if self._evaluate_rule(rule): - return rule - return None - +from etos_test_runner.lib.verdict import CustomVerdictMatcher class TestRunner: """Test runner for ETOS.""" @@ -106,6 +50,30 @@ def __init__(self, iut, etos): self.etos.config.set("iut", self.iut) self.plugins = self.etos.config.get("plugins") + verdict_rule_file = os.getenv("VERDICT_RULE_FILE") + if verdict_rule_file is not None: + with open(verdict_rule_file, "r", encoding="utf-8") as inp: + rules = json.load(inp) + else: + rules = [] + + # debug + rules = [ + { + "description": "Test execution interrupted by the user", + "condition": { + "test_framework_exit_code": 0 + }, + "conclusion": "ABORTED", + "verdict": "FAILED" + } + ] + # end debug + + self.verdict_matcher = CustomVerdictMatcher(rules) + self.test_framework_exit_codes = [] + + def test_suite_started(self): """Publish a test suite started event. @@ -149,7 +117,7 @@ def environment(self, context): host={"name": os.getenv("EXECUTION_SPACE_URL"), "user": "etos"}, ) - def run_tests(self, workspace: Workspace) -> tuple[bool, int]: + def run_tests(self, workspace: Workspace) -> tuple[bool, list[Union[int, None]]]: """Execute test recipes within a test executor. :param workspace: Which workspace to execute test suite within. @@ -159,6 +127,7 @@ def run_tests(self, workspace: Workspace) -> tuple[bool, int]: """ recipes = self.config.get("recipes") result = True + test_framework_exit_codes = [] for num, test in enumerate(recipes): self.logger.info("Executing test %s/%s", num + 1, len(recipes)) with Executor(test, self.iut, self.etos) as executor: @@ -171,10 +140,11 @@ def run_tests(self, workspace: Workspace) -> tuple[bool, int]: executor.result, executor.returncode, ) - return result, executor.returncode + test_framework_exit_codes.append(executor.returncode) + return result, test_framework_exit_codes def outcome( - self, result: bool, executed: bool, description: str, test_framework_exit_code: int + self, result: bool, executed: bool, description: str, test_framework_exit_codes: list[Union[int, None]] ) -> dict: """Get outcome from test execution. @@ -187,20 +157,18 @@ def outcome( :return: Outcome of test execution. :rtype: dict """ - verdict_rule_file = os.getenv("VERDICT_RULE_FILE") - custom_verdict = None - if verdict_rule_file is not None: - test_framework_output = { - "test_framework_exit_code": test_framework_exit_code, - } - with open(os.getenv("VERDICT_RULE_FILE"), "r", encoding="utf-8") as inp: - rules = json.load(inp) - cvm = CustomVerdictMatcher(rules, test_framework_output) - custom_verdict = cvm.evaluate() - if None not in (verdict_rule_file, custom_verdict): - conclusion = custom_verdict["conclusion"] - verdict = custom_verdict["verdict"] - description = custom_verdict["description"] + test_framework_output = { + "test_framework_exit_codes": test_framework_exit_codes, + } + custom_verdict = self.verdict_matcher.evaluate(test_framework_output) + if custom_verdict is not None: + try: + conclusion = custom_verdict["conclusion"] + verdict = custom_verdict["verdict"] + description = custom_verdict["description"] + except KeyError as err: + raise ValueError(f"Malformed entry in the verdict rule file: {custom_verdict}. " + "Expected attributes: description, condition, conclusion, verdict.") from err self.logger.info("Verdict matches testrunner verdict rule: %s", custom_verdict) elif executed: conclusion = "SUCCESSFUL" @@ -283,13 +251,13 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen result = True description = None executed = False - test_framework_exit_code = None + test_framework_exit_codes = [] try: with Workspace(self.log_area) as workspace: self.logger.info("Start IUT monitoring.") self.iut_monitoring.start_monitoring() self.logger.info("Starting test executor.") - result, test_framework_exit_code = self.run_tests(workspace) + result, test_framework_exit_codes = self.run_tests(workspace) executed = True self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() @@ -303,7 +271,7 @@ def execute(self): # pylint:disable=too-many-branches,disable=too-many-statemen self.logger.info("Stop IUT monitoring.") self.iut_monitoring.stop_monitoring() self.logger.info("Figure out test outcome.") - outcome = self.outcome(result, executed, description, test_framework_exit_code) + outcome = self.outcome(result, executed, description, test_framework_exit_codes) pprint(outcome) self.logger.info("Send test suite finished event.") From c31e135af4e60f74342055930c1e9e0f0a019dff Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 7 Jun 2024 12:50:01 +0200 Subject: [PATCH 06/13] code review changes --- src/etos_test_runner/lib/verdict.py | 76 +++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 src/etos_test_runner/lib/verdict.py diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py new file mode 100644 index 0000000..57eb92b --- /dev/null +++ b/src/etos_test_runner/lib/verdict.py @@ -0,0 +1,76 @@ +# Copyright 2020-2021 Axis Communications AB. +# +# For a full list of individual contributors, please see the commit history. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Verdict module.""" +from typing import Union + + +class CustomVerdictMatcher: + # pylint: disable=too-few-public-methods + """Match testframework output against user-defined verdict rules. + + Example rule definition: + + rules = [ + { + "description": "Test collection error, no artifacts created", + "condition": { + "test_framework_exit_code": 4, + }, + "conclusion": "FAILED", + "verdict": "FAILED", + } + ] + + Condition keywords: + - test_framework_exit_code: allows set custom verdict if the given exit code is + found in the list exit codes produced by the test framework. + """ + + SUPPORTED_CONDITION_KEYWORDS = [ + "test_framework_exit_code", + ] + + def __init__(self, rules: list) -> None: + """Create new instance.""" + self.rules = rules + for rule in self.rules: + for key in rule["condition"].keys(): + if key not in self.SUPPORTED_CONDITION_KEYWORDS: + raise ValueError( + f"Unsupported condition keyword for test outcome rules: {key}! " + f"Supported keywords: {self.SUPPORTED_CONDITION_KEYWORDS}." + ) + + def _evaluate_rule(self, rule: dict, test_framework_output: dict) -> bool: + """Evaluate conditions within the given rule.""" + for kw, expected_value in rule["condition"].items(): + # If the condition has multiple expressions, they are implicitly joined using logical AND: + # i. e. all shall evaluate to True in order for the condition to be True. + # False is returned as soon as a false statement is encountered. + if kw == "test_framework_exit_code": + # If the exit code given by the condition is found in the list of produced exit codes, + # the rule will evaluate as True. + if expected_value not in test_framework_output.get("test_framework_exit_codes"): + return False + return True + + def evaluate(self, , test_framework_output: dict) -> Union[dict, None]: + """Evaluate the list of given rules and return the first match.""" + for rule in self.rules: + if self._evaluate_rule(rule, test_framework_output): + return rule + return None + From 4e9956b6ee0d738972c662b251c50d3db1271cb4 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 7 Jun 2024 12:53:33 +0200 Subject: [PATCH 07/13] code review changes --- src/etos_test_runner/lib/verdict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py index 57eb92b..3d496db 100644 --- a/src/etos_test_runner/lib/verdict.py +++ b/src/etos_test_runner/lib/verdict.py @@ -67,7 +67,7 @@ def _evaluate_rule(self, rule: dict, test_framework_output: dict) -> bool: return False return True - def evaluate(self, , test_framework_output: dict) -> Union[dict, None]: + def evaluate(self, test_framework_output: dict) -> Union[dict, None]: """Evaluate the list of given rules and return the first match.""" for rule in self.rules: if self._evaluate_rule(rule, test_framework_output): From bce67fd36d3587b34e06f222715b7087edfe287c Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 7 Jun 2024 13:01:44 +0200 Subject: [PATCH 08/13] code review changes --- src/etos_test_runner/lib/testrunner.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 4971fa0..2ded4cf 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -57,19 +57,6 @@ def __init__(self, iut, etos): else: rules = [] - # debug - rules = [ - { - "description": "Test execution interrupted by the user", - "condition": { - "test_framework_exit_code": 0 - }, - "conclusion": "ABORTED", - "verdict": "FAILED" - } - ] - # end debug - self.verdict_matcher = CustomVerdictMatcher(rules) self.test_framework_exit_codes = [] From 3fc3b26fdeec3a6042758c499794368a800e64a3 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Fri, 7 Jun 2024 13:25:15 +0200 Subject: [PATCH 09/13] code review changes --- src/etos_test_runner/lib/testrunner.py | 17 ++++++++++++----- src/etos_test_runner/lib/verdict.py | 10 +++++----- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index 2ded4cf..a085833 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -27,9 +27,12 @@ from etos_test_runner.lib.log_area import LogArea from etos_test_runner.lib.verdict import CustomVerdictMatcher + class TestRunner: """Test runner for ETOS.""" + # pylint: disable=too-many-instance-attributes + logger = logging.getLogger("ETR") def __init__(self, iut, etos): @@ -58,8 +61,6 @@ def __init__(self, iut, etos): rules = [] self.verdict_matcher = CustomVerdictMatcher(rules) - self.test_framework_exit_codes = [] - def test_suite_started(self): """Publish a test suite started event. @@ -131,7 +132,11 @@ def run_tests(self, workspace: Workspace) -> tuple[bool, list[Union[int, None]]] return result, test_framework_exit_codes def outcome( - self, result: bool, executed: bool, description: str, test_framework_exit_codes: list[Union[int, None]] + self, + result: bool, + executed: bool, + description: str, + test_framework_exit_codes: list[Union[int, None]], ) -> dict: """Get outcome from test execution. @@ -154,8 +159,10 @@ def outcome( verdict = custom_verdict["verdict"] description = custom_verdict["description"] except KeyError as err: - raise ValueError(f"Malformed entry in the verdict rule file: {custom_verdict}. " - "Expected attributes: description, condition, conclusion, verdict.") from err + raise ValueError( + f"Malformed entry in the verdict rule file: {custom_verdict}. " + "Expected attributes: description, condition, conclusion, verdict." + ) from err self.logger.info("Verdict matches testrunner verdict rule: %s", custom_verdict) elif executed: conclusion = "SUCCESSFUL" diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py index 3d496db..39516c1 100644 --- a/src/etos_test_runner/lib/verdict.py +++ b/src/etos_test_runner/lib/verdict.py @@ -57,12 +57,13 @@ def __init__(self, rules: list) -> None: def _evaluate_rule(self, rule: dict, test_framework_output: dict) -> bool: """Evaluate conditions within the given rule.""" for kw, expected_value in rule["condition"].items(): - # If the condition has multiple expressions, they are implicitly joined using logical AND: - # i. e. all shall evaluate to True in order for the condition to be True. + # If the condition has multiple expressions, they are implicitly + # joined using logical AND: i. e. all shall evaluate to True + # in order for the condition to be True. # False is returned as soon as a false statement is encountered. if kw == "test_framework_exit_code": - # If the exit code given by the condition is found in the list of produced exit codes, - # the rule will evaluate as True. + # If the exit code given by the condition is found in + # the list of produced exit codes, the rule will evaluate as True. if expected_value not in test_framework_output.get("test_framework_exit_codes"): return False return True @@ -73,4 +74,3 @@ def evaluate(self, test_framework_output: dict) -> Union[dict, None]: if self._evaluate_rule(rule, test_framework_output): return rule return None - From f54e369336d6e3e8a5e548e96623b3cc92828c89 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Thu, 13 Jun 2024 10:51:01 +0200 Subject: [PATCH 10/13] Rule validation fix added. Copyright years removed. --- src/etos_test_runner/lib/testrunner.py | 12 +++--------- src/etos_test_runner/lib/verdict.py | 17 ++++++++++++++--- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/etos_test_runner/lib/testrunner.py b/src/etos_test_runner/lib/testrunner.py index a085833..41022d3 100644 --- a/src/etos_test_runner/lib/testrunner.py +++ b/src/etos_test_runner/lib/testrunner.py @@ -154,15 +154,9 @@ def outcome( } custom_verdict = self.verdict_matcher.evaluate(test_framework_output) if custom_verdict is not None: - try: - conclusion = custom_verdict["conclusion"] - verdict = custom_verdict["verdict"] - description = custom_verdict["description"] - except KeyError as err: - raise ValueError( - f"Malformed entry in the verdict rule file: {custom_verdict}. " - "Expected attributes: description, condition, conclusion, verdict." - ) from err + conclusion = custom_verdict["conclusion"] + verdict = custom_verdict["verdict"] + description = custom_verdict["description"] self.logger.info("Verdict matches testrunner verdict rule: %s", custom_verdict) elif executed: conclusion = "SUCCESSFUL" diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py index 39516c1..abf2dd2 100644 --- a/src/etos_test_runner/lib/verdict.py +++ b/src/etos_test_runner/lib/verdict.py @@ -1,4 +1,4 @@ -# Copyright 2020-2021 Axis Communications AB. +# Copyright Axis Communications AB. # # For a full list of individual contributors, please see the commit history. # @@ -39,14 +39,25 @@ class CustomVerdictMatcher: found in the list exit codes produced by the test framework. """ - SUPPORTED_CONDITION_KEYWORDS = [ + REQUIRED_RULE_KEYWORDS = { + "description", + "condition", + "conclusion", + "verdict", + } + SUPPORTED_CONDITION_KEYWORDS = { "test_framework_exit_code", - ] + } def __init__(self, rules: list) -> None: """Create new instance.""" self.rules = rules for rule in self.rules: + if rule.keys() != self.REQUIRED_RULE_KEYWORDS: + raise ValueError( + f"Unsupported rule definition: {rule}. " + "Required keywords: {self.REQUIRED_RULE_KEYWORDS}" + ) for key in rule["condition"].keys(): if key not in self.SUPPORTED_CONDITION_KEYWORDS: raise ValueError( From 254a0f24e501003ac2d94101f29ae95a8d691225 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Thu, 13 Jun 2024 10:56:32 +0200 Subject: [PATCH 11/13] black formatting fix --- src/etos_test_runner/lib/verdict.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py index abf2dd2..79864a8 100644 --- a/src/etos_test_runner/lib/verdict.py +++ b/src/etos_test_runner/lib/verdict.py @@ -1,4 +1,4 @@ -# Copyright Axis Communications AB. +# Copyright 1Axis Communications AB. # # For a full list of individual contributors, please see the commit history. # @@ -40,10 +40,10 @@ class CustomVerdictMatcher: """ REQUIRED_RULE_KEYWORDS = { - "description", - "condition", - "conclusion", - "verdict", + "description", + "condition", + "conclusion", + "verdict", } SUPPORTED_CONDITION_KEYWORDS = { "test_framework_exit_code", From 4742fb4ab49d09e0d9d4751c9c6aea4ec303ff78 Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Thu, 13 Jun 2024 14:28:17 +0200 Subject: [PATCH 12/13] set equality fix --- src/etos_test_runner/lib/verdict.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py index 79864a8..972d3c9 100644 --- a/src/etos_test_runner/lib/verdict.py +++ b/src/etos_test_runner/lib/verdict.py @@ -53,10 +53,10 @@ def __init__(self, rules: list) -> None: """Create new instance.""" self.rules = rules for rule in self.rules: - if rule.keys() != self.REQUIRED_RULE_KEYWORDS: + if set(rule.keys()) != self.REQUIRED_RULE_KEYWORDS: raise ValueError( f"Unsupported rule definition: {rule}. " - "Required keywords: {self.REQUIRED_RULE_KEYWORDS}" + f"Required keywords: {self.REQUIRED_RULE_KEYWORDS}" ) for key in rule["condition"].keys(): if key not in self.SUPPORTED_CONDITION_KEYWORDS: From 13c3926775d3051440537e75784c0f76064dbcaf Mon Sep 17 00:00:00 2001 From: Andrei Matveyeu Date: Mon, 8 Jul 2024 10:11:55 +0200 Subject: [PATCH 13/13] typo fix --- src/etos_test_runner/lib/verdict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/etos_test_runner/lib/verdict.py b/src/etos_test_runner/lib/verdict.py index 972d3c9..90557f0 100644 --- a/src/etos_test_runner/lib/verdict.py +++ b/src/etos_test_runner/lib/verdict.py @@ -1,4 +1,4 @@ -# Copyright 1Axis Communications AB. +# Copyright Axis Communications AB. # # For a full list of individual contributors, please see the commit history. #