From a70fbc314d2dd6bda9b6659eed52d6c1901b9fb7 Mon Sep 17 00:00:00 2001 From: Chanyoung So Date: Wed, 15 Jun 2022 11:10:56 +0200 Subject: [PATCH 01/26] gallia-analyze initial commit --- src/gallia/analyzer/__init__.py | 1 + src/gallia/analyzer/analyzer.py | 461 ++++++++++++++ src/gallia/analyzer/arg_help.py | 94 +++ src/gallia/analyzer/categorizer.py | 388 ++++++++++++ src/gallia/analyzer/config.py | 134 ++++ src/gallia/analyzer/constants.py | 30 + src/gallia/analyzer/db_handler.py | 194 ++++++ src/gallia/analyzer/exceptions.py | 24 + src/gallia/analyzer/extractor.py | 192 ++++++ src/gallia/analyzer/failure.py | 135 ++++ src/gallia/analyzer/iso_def.py | 6 + src/gallia/analyzer/json/conditions.json | 36 ++ src/gallia/analyzer/json/responses.json | 63 ++ .../analyzer/json/uds_iso_standard.json | 28 + src/gallia/analyzer/main.py | 158 +++++ src/gallia/analyzer/mode_config.py | 37 ++ src/gallia/analyzer/name_config.py | 129 ++++ src/gallia/analyzer/naming_conventions.txt | 53 ++ src/gallia/analyzer/operator.py | 581 ++++++++++++++++++ src/gallia/analyzer/reporter.py | 360 +++++++++++ src/gallia/analyzer/time_analyzer.py | 240 ++++++++ src/gallia/analyzer/xl_generator.py | 544 ++++++++++++++++ 22 files changed, 3888 insertions(+) create mode 100644 src/gallia/analyzer/__init__.py create mode 100644 src/gallia/analyzer/analyzer.py create mode 100644 src/gallia/analyzer/arg_help.py create mode 100644 src/gallia/analyzer/categorizer.py create mode 100644 src/gallia/analyzer/config.py create mode 100644 src/gallia/analyzer/constants.py create mode 100644 src/gallia/analyzer/db_handler.py create mode 100644 src/gallia/analyzer/exceptions.py create mode 100644 src/gallia/analyzer/extractor.py create mode 100644 src/gallia/analyzer/failure.py create mode 100644 src/gallia/analyzer/iso_def.py create mode 100644 src/gallia/analyzer/json/conditions.json create mode 100644 src/gallia/analyzer/json/responses.json create mode 100644 src/gallia/analyzer/json/uds_iso_standard.json create mode 100755 src/gallia/analyzer/main.py create mode 100644 src/gallia/analyzer/mode_config.py create mode 100644 src/gallia/analyzer/name_config.py create mode 100644 src/gallia/analyzer/naming_conventions.txt create mode 100644 src/gallia/analyzer/operator.py create mode 100644 src/gallia/analyzer/reporter.py create mode 100644 src/gallia/analyzer/time_analyzer.py create mode 100644 src/gallia/analyzer/xl_generator.py diff --git a/src/gallia/analyzer/__init__.py b/src/gallia/analyzer/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/src/gallia/analyzer/__init__.py @@ -0,0 +1 @@ + diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py new file mode 100644 index 000000000..17dabc6a2 --- /dev/null +++ b/src/gallia/analyzer/analyzer.py @@ -0,0 +1,461 @@ +""" +gallia-analyze Analyzer module +""" +import os +import json +from json.decoder import JSONDecodeError +from sqlite3 import OperationalError +import textwrap +from typing import Tuple +import numpy as np +from pandas.core.indexing import IndexingError +from gallia.analyzer.operator import Operator +from gallia.analyzer.config import SrcPath +from gallia.analyzer.mode_config import LogMode, ScanMode, OpMode +from gallia.analyzer.name_config import ColNm, KyNm, TblNm, VwNm, NEG_STR + +if __name__ == "__main__": + exit() + + +class Analyzer(Operator): + """ + Analyzer class for categorizing failures(undocumented, missing) + at each scan mode(scan_service, scan_identifier) + and operation mode(ISO or vendor-specific). + """ + + def __init__( + self, + path: str = "", + log_mode: LogMode = LogMode.STD_OUT, + debug_on: bool = False, + ): + Operator.__init__(self, path, log_mode) + self.msg_head = "[Analyzer] " + self.debug_on = debug_on + + def analyze(self, runs_vec: np.ndarray, op_mode: OpMode = OpMode.VEN_SPEC) -> bool: + """ + analyze given input runs at a given operation mode. + """ + if not self.load_meta(force=True): + return False + if op_mode == OpMode.VEN_SPEC: + if not self.load_ven_lu(): + return False + if not self.load_ven_sess(): + return False + for run in runs_vec: + self.analyze_each_run(run, op_mode) + return True + + def analyze_each_run(self, run: int, op_mode: OpMode) -> bool: + """ + analyze certain run at a given operation mode. + """ + self.log(f"analyzing run #{str(run)} from {self.db_path} ...") + scan_mode = self.get_scan_mode(run) + if scan_mode == ScanMode.SERV: + if not self.reset(TblNm.serv, run): + return False + return self.analyze_serv(run, op_mode) + if scan_mode == ScanMode.IDEN: + if not self.reset(TblNm.iden, run): + return False + return self.analyze_iden(run, op_mode) + return False + + def reset(self, table_name: str, run: int) -> bool: + """ + reset analysis results in relational table in database. + """ + reset_sql = f""" + UPDATE "{table_name}" SET "{ColNm.fail}" = 255 WHERE "{ColNm.run}" = {str(run)}; + """ + try: + self.cur.executescript(reset_sql) + self.con.commit() + except (OperationalError, FileNotFoundError, KeyError) as exc: + self.log("reseting analysis in place failed", True, exc) + return False + return True + + def analyze_serv(self, run: int, op_mode: OpMode) -> bool: + """ + analyze scan_service result data for certain run at a given operation mode + without using data frame direct in data base. + """ + try: + ecu_mode = self.get_ecu_mode(run) + self.prepare_alwd_all(ecu_mode, op_mode) + with open(SrcPath.cond_src, encoding="utf8") as source_json: + cond_ls = json.load(source_json) + analyze_sql = "" + for cond_dict in cond_ls: + try: + if cond_dict[KyNm.scan_mode] == KyNm.scan_serv: + fail, cond = self.interpret(cond_dict, op_mode, ScanMode.SERV) + update_sql = ( + f"""UPDATE "{TblNm.serv}" """ + + f"""SET "{ColNm.fail}" = {fail} """ + + f"""WHERE "{ColNm.run}" = {str(run)} """ + + f"""AND "{ColNm.fail}" = 255{cond};\n""" + ) + analyze_sql += update_sql + except KeyError as exc: + self.log("condition key reading failed", True, exc) + if self.debug_on: + if not os.path.isdir("debug"): + os.mkdir("debug") + with open( + f"./debug/analyze_serv_{str(run)}.sql", "w", encoding="utf8" + ) as file: + file.write(analyze_sql) + self.cur.executescript(analyze_sql) + self.con.commit() + self.clear_alwd() + except ( + OperationalError, + FileNotFoundError, + KeyError, + IndexingError, + AttributeError, + JSONDecodeError, + ) as exc: + self.log("analyzing scan_service in place failed", True, exc) + return False + return True + + def analyze_iden(self, run: int, op_mode: OpMode) -> bool: + """ + analyze scan_identifier result data for certain run at a given operation mode + without using data frame direct in data base. + """ + if op_mode == OpMode.ISO: + self.log("ISO Standard analysis unavailable for scan_identifier.", True) + return False + self.prepare_alwd_res() + try: + with open(SrcPath.cond_src, encoding="utf8") as src_json: + cond_ls = json.load(src_json) + serv = self.get_sid(run) + if serv == -1: + return False + create_view_sql = f""" + DROP VIEW IF EXISTS "{VwNm.sess_alwd}"; + CREATE VIEW "{VwNm.sess_alwd}" + AS SELECT "{ColNm.sess}" + FROM "{TblNm.ven_lu}" + WHERE "{ColNm.serv}" = {serv} + GROUP BY "{ColNm.sess}"; + DROP VIEW IF EXISTS "{VwNm.sbfn_alwd}"; + CREATE VIEW "{VwNm.sbfn_alwd}" + AS SELECT "{ColNm.sbfn}" + FROM "{TblNm.ven_lu}" + WHERE "{ColNm.serv}" = {serv} + GROUP BY "{ColNm.sbfn}"; + DROP VIEW IF EXISTS "{VwNm.resp_alwd}"; + CREATE VIEW "{VwNm.resp_alwd}" + AS SELECT "{ColNm.resp}" + FROM "{TblNm.ref_resp}" + WHERE "{ColNm.serv}" = {serv} + GROUP BY "{ColNm.resp}"; + DROP VIEW IF EXISTS "{VwNm.ref_vw}"; + CREATE VIEW "{VwNm.ref_vw}" + AS SELECT "{ColNm.serv}", "{ColNm.sess}", "{ColNm.boot}", + "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.ecu_mode}" + FROM "{TblNm.ven_lu}" WHERE "{ColNm.serv}" = {serv}; + """ + analyze_sql = textwrap.dedent(create_view_sql) + "\n" + for cond_dict in cond_ls: + try: + if cond_dict[KyNm.scan_mode] == KyNm.scan_iden: + fail, cond = self.interpret(cond_dict, op_mode, ScanMode.IDEN) + update_sql = ( + f"""UPDATE "{TblNm.iden}" """ + + f"""SET "{ColNm.fail}" = {fail} """ + + f"""WHERE "{ColNm.run}" = {str(run)} """ + + f"""AND "{ColNm.fail}" = 255{cond};\n""" + ) + analyze_sql += update_sql + else: + pass + except (KeyError) as exc: + self.log("condition key reading failed", True, exc) + drop_view_sql = f""" + DROP VIEW IF EXISTS "{VwNm.sess_alwd}"; + DROP VIEW IF EXISTS "{VwNm.sbfn_alwd}"; + DROP VIEW IF EXISTS "{VwNm.resp_alwd}"; + DROP VIEW IF EXISTS "{VwNm.ref_vw}"; + """ + analyze_sql += textwrap.dedent(drop_view_sql) + "\n" + if self.debug_on: + if not os.path.isdir("debug"): + os.mkdir("debug") + with open( + f"./debug/analyze_iden_{str(run)}.sql", "w", encoding="utf8" + ) as file: + file.write(analyze_sql) + self.cur.executescript(analyze_sql) + self.con.commit() + self.clear_alwd() + except ( + OperationalError, + FileNotFoundError, + KeyError, + IndexingError, + AttributeError, + JSONDecodeError, + ) as exc: + self.log("analyzing scan_identifier in place failed", True, exc) + return False + return True + + def interpret( + self, + cond_dict: dict, + op_mode: OpMode, + scan_mode: ScanMode, + ) -> Tuple[int, str]: + """ + interpret JSON conditions file and get failure condition partial SQL string. + """ + cond = "" + try: + failure = self.fail_name_dict[cond_dict[KyNm.fail]] + except (KeyError) as exc: + self.log("getting failure condition from JSON failed", True, exc) + return 255, "" + + if KyNm.match in cond_dict.keys(): + cond = self.get_fail_cond_match(cond, cond_dict, scan_mode, op_mode) + + if NEG_STR + KyNm.match in cond_dict.keys(): + cond = self.get_fail_cond_match( + cond, cond_dict, scan_mode, op_mode, neg=True + ) + + if KyNm.resd in cond_dict.keys(): + cond = self.get_fail_cond_resp(cond, cond_dict) + + if NEG_STR + KyNm.resd in cond_dict.keys(): + cond = self.get_fail_cond_resp(cond, cond_dict, neg=True) + + if KyNm.supp in cond_dict.keys(): + cond = self.get_fail_cond_supp(cond, cond_dict, scan_mode, op_mode) + + if NEG_STR + KyNm.supp in cond_dict.keys(): + cond = self.get_fail_cond_supp( + cond, cond_dict, scan_mode, op_mode, neg=True + ) + + if KyNm.for_serv in cond_dict.keys(): + cond = self.get_fail_cond_for_serv(cond, cond_dict) + + if NEG_STR + KyNm.for_serv in cond_dict.keys(): + cond = self.get_fail_cond_for_serv(cond, cond_dict, neg=True) + + if KyNm.known in cond_dict.keys(): + cond = self.get_fail_cond_known(cond, cond_dict) + + if NEG_STR + KyNm.known in cond_dict.keys(): + cond = self.get_fail_cond_known(cond, cond_dict, neg=True) + + return failure, cond + + def get_neg_str(self, neg: bool = False) -> str: + """ + get negative prefix for SQL query and condition key. + """ + if neg: + return NEG_STR + else: + return "" + + def get_fail_cond_match( + self, + cond: str, + cond_dict: dict, + scan_mode: ScanMode, + op_mode: OpMode = OpMode.VEN_SPEC, + neg: bool = False, + ) -> str: + """ + get failure condition SQL query for the keyword 'match'. + """ + if op_mode == OpMode.VEN_SPEC and scan_mode == ScanMode.IDEN: + ref_cols = "" + neg_str = self.get_neg_str(neg) + try: + for ref_col in cond_dict[neg_str + KyNm.match]: + if ref_col in ( + ColNm.sess, + ColNm.serv, + ColNm.sbfn, + ColNm.iden, + ColNm.ecu_mode, + ColNm.boot, + ): + ref_cols = ref_cols + f""""{ref_col}"||"/"||""" + ref_cols = ref_cols[:-7] + add_cond = ( + f""" AND ({ref_cols}) {neg_str}IN """ + + f"""(SELECT({ref_cols} ) FROM "{VwNm.ref_vw}")""" + ) + except (KeyError) as exc: + self.log( + f"condition key reading failed at '{neg_str}{KyNm.match}'", + True, + exc, + ) + add_cond = "" + return cond + " " + textwrap.dedent(add_cond) + + def get_fail_cond_resp(self, cond: str, cond_dict: dict, neg: bool = False) -> str: + """ + get failure condition SQL query for the keyword 'responded'. + """ + try: + neg_str = self.get_neg_str(neg) + add_cond = f""" AND "{ColNm.resp}" {neg_str}IN (""" + for resp_name in cond_dict[neg_str + KyNm.resd]: + if str(resp_name).strip("-").isnumeric(): + add_cond += str(resp_name) + "," + else: + add_cond += str(self.iso_err_name_dict[resp_name]) + "," + add_cond = add_cond[:-1] + ")" + except (KeyError) as exc: + self.log( + f"condition key reading failed at '{neg_str}{KyNm.resd}'", True, exc + ) + add_cond = "" + return cond + " " + textwrap.dedent(add_cond) + + def get_fail_cond_supp( + self, + cond: str, + cond_dict: dict, + scan_mode: ScanMode, + op_mode: OpMode = OpMode.VEN_SPEC, + neg: bool = False, + ) -> str: + """ + get failure condition SQL query for the keyword 'supported'. + """ + neg_str = self.get_neg_str(neg) + if op_mode == OpMode.ISO: + supp_serv_vec = self.supp_serv_iso_vec + if op_mode == OpMode.VEN_SPEC: + supp_serv_vec = self.supp_serv_ven_vec + try: + add_cond = "" + supp_ls = cond_dict[neg_str + KyNm.supp] + if ColNm.serv in supp_ls: + add_cond = f""" AND "{ColNm.serv}" {neg_str}IN (""" + for serv in supp_serv_vec: + add_cond += str(serv) + "," + cond += add_cond[:-1] + ")" + if ColNm.sess in supp_ls: + if scan_mode == ScanMode.IDEN: + add_cond = ( + f""" AND "{ColNm.sess}" {neg_str}IN """ + + f"""(SELECT * FROM "{VwNm.sess_alwd}")""" + ) + if scan_mode == ScanMode.SERV: + if op_mode == OpMode.ISO: + add_cond = ( + f""" AND ("{ColNm.serv}"||"/"||"{ColNm.sess}") """ + + f"""{neg_str}IN (SELECT("{ColNm.serv}"||"/"||"{ColNm.sess}") """ + + f"""FROM "{TblNm.ref_sess}")""" + ) + if op_mode == OpMode.VEN_SPEC: + add_cond = ( + f""" AND ("{ColNm.serv}"||"/"||"{ColNm.sess}"||"/"||"{ColNm.boot}") """ + + f"""{neg_str}IN (SELECT("{ColNm.serv}"||"/"||"{ColNm.sess}"||"/"||"{ColNm.boot}") """ + + f"""FROM "{TblNm.ref_sess}")""" + ) + cond += add_cond + if ColNm.sbfn in supp_ls: + if scan_mode == ScanMode.IDEN: + add_cond = ( + f""" AND "{ColNm.sbfn}" {neg_str}IN """ + + f"""(SELECT * FROM "{VwNm.sbfn_alwd}")""" + ) + if scan_mode == ScanMode.SERV: + add_cond = ( + f""" AND ("{ColNm.serv}"||"/"||"{ColNm.sbfn}") """ + + f"""{neg_str}IN (SELECT("{ColNm.serv}"||"/"||"{ColNm.sbfn}") """ + + f"""FROM "{TblNm.ref_sbfn}")""" + ) + cond += add_cond + if ColNm.resp in supp_ls: + if scan_mode == ScanMode.IDEN: + add_cond = ( + f""" AND "{ColNm.resp}" {neg_str}IN """ + + f"""(SELECT * FROM "{VwNm.resp_alwd}")""" + ) + if scan_mode == ScanMode.SERV: + add_cond = ( + f""" AND ("{ColNm.serv}"||"/"||"{ColNm.resp}") """ + + f"""{neg_str}IN (SELECT("{ColNm.serv}"||"/"||"{ColNm.resp}") """ + + f"""FROM "{TblNm.ref_resp}")""" + ) + cond += add_cond + except (KeyError) as exc: + self.log( + f"condition key reading failed at '{neg_str}{KyNm.supp}'", True, exc + ) + return cond + + def get_fail_cond_for_serv( + self, cond: str, cond_dict: dict, neg: bool = False + ) -> str: + """ + get failure condition SQL query for the keyword 'for service'. + """ + neg_str = self.get_neg_str(neg) + try: + add_cond = f""" AND "{ColNm.serv}" {neg_str}IN (""" + for serv_name in cond_dict[neg_str + KyNm.for_serv]: + if str(serv_name).strip("-").isnumeric(): + add_cond += str(serv_name) + "," + else: + add_cond += str(self.iso_serv_name_dict[serv_name]) + "," + add_cond = add_cond[:-1] + ")" + except (KeyError) as exc: + self.log( + f"condition key reading failed at '{neg_str}{KyNm.for_serv}'", True, exc + ) + return cond + return cond + " " + add_cond + + def get_fail_cond_known(self, cond: str, cond_dict: dict, neg: bool = False) -> str: + """ + get failure condition SQL query for the keyword 'known'. + """ + neg_str = self.get_neg_str(neg) + try: + unknown_ls = cond_dict[neg_str + KyNm.known] + if ColNm.serv in unknown_ls: + add_cond = f""" AND "{ColNm.serv}" {neg_str}IN (""" + for serv in self.iso_serv_code_vec: + add_cond += str(serv) + "," + add_cond = add_cond[:-1] + cond += add_cond + ")" + if ColNm.sess in unknown_ls: + add_cond = f""" AND "{ColNm.sess}" {neg_str}IN (""" + for sess in self.sess_code_vec: + add_cond += str(sess) + "," + add_cond = add_cond[:-1] + cond += add_cond + ")" + if ColNm.resp in unknown_ls: + add_cond = f""" AND "{ColNm.resp}" {neg_str}IN (""" + for resp in self.iso_err_code_vec: + add_cond += str(resp) + "," + cond += add_cond[:-1] + ")" + except (KeyError) as exc: + self.log( + f"condition key reading failed at '{neg_str}{KyNm.known}'", True, exc + ) + return cond diff --git a/src/gallia/analyzer/arg_help.py b/src/gallia/analyzer/arg_help.py new file mode 100644 index 000000000..3c2ef29d2 --- /dev/null +++ b/src/gallia/analyzer/arg_help.py @@ -0,0 +1,94 @@ +""" +gallia-analyze module for argument help texts +""" + +if __name__ == "__main__": + exit() + + +class ArgHelp: + """ + class for argument help text + """ + + main = """ + gallia-analyze, version 0.1.0 + Extract, analyze and visualize data obtained from gallia scan_service and scan_identifier. + """ + + usage = """ + ex) execute all the analysis and reporting operations in a sequence for runs 1 to 5. + > gallia-analyze --source [database file path] -near --from 1 --to 5 + + ex) clear all the data and redo all the analysis operations in a sequence for all runs. + > gallia-analyze --source [database file path] -clean + + Please refer to help for details. + > gallia-analye --help + """ + + # Commands + analyze = """ + Categorize failures judging by parameters using vendor-specific lookup data as default. + """ + clear = """ + Clear all analyzed data in database. + """ + extract = """ + Extract JSON data, etc. from database and store into relational database. + """ + aio_iden = """ + Consolidate all scan_identifier runs into one EXCEL file sorted by ECU mode for a certain Service ID. + """ + graph = """ + Output reponse statistic graphs in PNG format. + """ + report = """ + Output reports in excel file. + """ + aio_serv = """ + Consolidate all scan_service runs into one EXCEL file sorted by ECU mode. + """ + time = """ + Conduct time analysis with reaction time. + """ + + # Options + all_serv = """ + Iterate 'all-ECU-modes' reporting for all services by identifier defined in UDS ISO Standard. + """ + debug = """ + Use debug mode. Save SQL queries for analysis to SQL files. + """ + iso = """ + Use UDS ISO Standard while analyzing data. + """ + log = """ + Log messages to file. + """ + possible = """ + Show all possible service IDs or Identifiers on summary sheet. + """ + cat = """ + Use Categorizer(Analyzer based on pandas framework) instead of SQL-based Analyzer. + """ + + # Parameters + sid = """ + Service ID to report for all ECU modes in one EXCEL file + """ + first = """ + The first run to process + """ + last = """ + The last run to process + """ + output = """ + Path of excel reports + """ + source = """ + Path of source database + """ + prec = """ + Time precision for time analysis. Defined as the number of digits in Unix time. ex) 19 = nanosecond + """ diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py new file mode 100644 index 000000000..549cdd43a --- /dev/null +++ b/src/gallia/analyzer/categorizer.py @@ -0,0 +1,388 @@ +""" +gallia-analyze Categorizer module +""" + +from sqlite3 import OperationalError +import numpy as np +import pandas as pd +from gallia.analyzer.analyzer import Analyzer +from gallia.analyzer.config import TblStruct +from gallia.analyzer.failure import Failure +from gallia.analyzer.mode_config import LogMode, OpMode +from gallia.analyzer.name_config import ColNm, TblNm +from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException +from gallia.uds.core.constants import UDSIsoServices, UDSErrorCodes + +if __name__ == "__main__": + exit() + + +class Categorizer(Analyzer): + """ + Categorizer class for analysis operation based on pandas. + Inherited from Analyzer. + """ + + def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): + Analyzer.__init__(self, path, log_mode) + self.msg_head = "[Categorizer] " + + def analyze_serv(self, run: int, op_mode: OpMode) -> bool: + """ + analyze scan_service result data for certain run at a given analysis mode. + """ + try: + raw_df = self.read_run_db(TblNm.serv, run) + self.check_df(raw_df, TblStruct.serv) + raw_df = self.categorize_serv(raw_df, op_mode) + if not self.delete_run_db(TblNm.serv, run): + return False + if not self.write_db(raw_df, TblNm.serv): + return False + except (EmptyTableException, ColumnMismatchException, OperationalError) as exc: + self.log("analyzing scan_service failed", True, exc) + return False + return True + + def analyze_iden(self, run: int, op_mode: OpMode) -> bool: + """ + analyze scan_identifier result data for certain run at a given analysis mode. + """ + try: + if not self.load_lu_iden(self.get_sid(run), self.get_ecu_mode(run)): + return False + raw_df = self.read_run_db(TblNm.iden, run) + self.check_df(raw_df, TblStruct.iden) + raw_df.set_index(ColNm.id, inplace=True) + raw_df = self.categorize_iden(raw_df, op_mode) + if not self.delete_run_db(TblNm.iden, run): + return False + if not self.write_db(raw_df, TblNm.iden): + return False + except (EmptyTableException, ColumnMismatchException, OperationalError) as exc: + self.log("analyzing scan_identifier failed", True, exc) + return False + return True + + def categorize_serv( + self, raw_df: pd.DataFrame, op_mode: OpMode = OpMode.VEN_SPEC + ) -> pd.DataFrame: + """ + categorize failures for scan_service. + """ + try: + raw_df[ColNm.combi] = list( + zip( + raw_df[ColNm.serv], + raw_df[ColNm.sess], + raw_df[ColNm.resp], + raw_df[ColNm.ecu_mode], + ) + ) + raw_df.loc[:, ColNm.fail] = raw_df[ColNm.combi].apply( + lambda x: self.get_fail_serv(op_mode, x[0], x[1], x[2], x[3]) + ) + raw_df = raw_df.drop([ColNm.combi], axis=1) + except (KeyError) as exc: + self.log("categorizing failures for scan_service failed", True, exc) + return pd.DataFrame() + return raw_df + + def categorize_iden( + self, + raw_df: pd.DataFrame, + ecu_mode: int, + op_mode: OpMode = OpMode.VEN_SPEC, + ) -> pd.DataFrame: + """ + categorize failures for scan_identifier. + """ + try: + serv_vec = pd.unique(raw_df[ColNm.serv]) + if not serv_vec.size == 1: + self.log("more than one service in a run", True) + return pd.DataFrame() + else: + serv = serv_vec[0] + if not self.load_lu_iden(serv, ecu_mode): + return pd.DataFrame() + raw_df[ColNm.combi] = list( + zip( + raw_df[ColNm.sess], + raw_df[ColNm.boot], + raw_df[ColNm.sbfn], + raw_df[ColNm.iden], + raw_df[ColNm.resp], + raw_df[ColNm.ecu_mode], + ) + ) + raw_df.loc[:, ColNm.fail] = raw_df[ColNm.combi].apply( + lambda x: self.get_fail_iden( + op_mode, serv, x[0], x[1], x[2], x[3], x[4], x[5] + ) + ) + raw_df = raw_df.drop([ColNm.combi], axis=1) + except (KeyError) as exc: + self.log("categorizing failures for scan_identifier failed", True, exc) + return pd.DataFrame() + return raw_df + + def check_sess_alwd(self, serv: int, sess: int, op_mode: OpMode, ecu_mode) -> bool: + """ + check if a certain diagnostic session is available or supported + for a certain service at given analysis mode. + """ + if op_mode == OpMode.VEN_SPEC: + ref_df = self.ref_ven_df[ecu_mode] + if op_mode == OpMode.ISO: + ref_df = self.ref_iso_df + if not serv in ref_df.index: + return False + return sess in ref_df.loc[serv, ColNm.sess] + + def check_resp_alwd(self, serv: int, resp: int) -> bool: + """ + check if a certain response is available or supported for a certain service. + """ + if not serv in list(self.ref_iso_df.index): + return False + return ( + resp + in self.ref_iso_df.loc[serv, ColNm.resp] + + self.iso_supp_err_for_all_vec.tolist() + ) + + def check_sbfn_alwd(self, serv: int, sbfn: int, op_mode: OpMode, ecu_mode) -> bool: + """ + check if a certain sub-function is available or supported + for a certain service at given analysis mode. + """ + if op_mode == OpMode.VEN_SPEC: + ref_df = self.ref_ven_df[ecu_mode] + if op_mode == OpMode.ISO: + ref_df = self.ref_iso_df + if not serv in ref_df.index: + return False + return sbfn in ref_df.loc[serv, ColNm.sbfn] + + def get_fail_serv( + self, + op_mode: OpMode, + serv: int, + sess: int, + resp: int, + ecu_mode: int, + ) -> Failure: + """ + get failure for given parameters, service, diagnostic session and response + at given analysis mode. + """ + if op_mode == OpMode.VEN_SPEC: + supp_serv_vec = self.supp_serv_ven_vec + if op_mode == OpMode.ISO: + supp_serv_vec = self.supp_serv_iso_vec + + cond_serv_known = serv in self.iso_serv_code_vec + cond_serv_supp = serv in supp_serv_vec + cond_resp_means_not_supp = resp in self.iso_err_means_not_supp_vec + cond_no_resp = resp == -1 + cond_sess_alwd = self.check_sess_alwd(serv, sess, op_mode, ecu_mode) + cond_resp_alwd = self.check_resp_alwd(serv, resp) + cond_resp_serv_not_supp = resp == UDSErrorCodes.serviceNotSupported + cond_resp_serv_not_supp_in_cur_sess = ( + resp == UDSErrorCodes.serviceNotSupportedInActiveSession + ) + cond_resp_sbfn_not_supp = resp == UDSErrorCodes.subFunctionNotSupported + + # invalid or unknown response + if resp == 0x80: + return Failure.UNDOC_SERV + if resp == 0xA0: + return Failure.UNDOC_SERV + + if not cond_serv_known: + # normal responses to unknown services + if cond_resp_serv_not_supp: + return Failure.OK_SERV_A + + # time out / no Response to unknown services + if cond_no_resp: + return Failure.OK_SERV_B + + if not cond_serv_supp: + # normal responses to unsupporeted services + if cond_resp_means_not_supp: + return Failure.OK_SERV_C + + # time out / no Response to unsupported services + if cond_no_resp: + return Failure.OK_SERV_D + + # Undocumented Type A: services not defined in ISO standard + # or vendor-specific reference responded otherwise + if not cond_resp_means_not_supp and not cond_no_resp: + return Failure.UNDOC_SERV_A + + if cond_serv_supp: + # normal response to supported services when they are not supported in active session + if not cond_sess_alwd: + if cond_resp_means_not_supp: + return Failure.OK_SERV_E + + # Undocumented Type B: supported servcies in not available session responded + # other than "not supported" family + if not cond_resp_means_not_supp: + return Failure.UNDOC_SERV_B + + if cond_sess_alwd: + # available NRC to available service in active session + if cond_resp_alwd and not cond_resp_means_not_supp: + return Failure.OK_SERV_F + + # supported servcies (and even in available session) give a response undocumented in ISO + if not cond_resp_means_not_supp: + return Failure.OK_SERV_G + + # Missing Type A: in ISO standard or vendor-specific reference defined as available + # in a session but gives response "not supported in active session" + if cond_resp_serv_not_supp_in_cur_sess: + return Failure.MISS_SERV_A + + # Missing Type B: in ISO standard or vendor-specific reference defined as available + # in a session but gives response "service not supported" + if cond_resp_serv_not_supp: + return Failure.MISS_SERV_B + + # supported services in available session give a reponded as "subFunctionNotSupported" + if cond_resp_sbfn_not_supp: + return Failure.OK_SERV_H + + return Failure.UNKNOWN + + def get_fail_iden( + self, + op_mode: OpMode, + serv: int, + sess: int, + boot: int, + sbfn: int, + iden: int, + resp: int, + ecu_mode: int, + ) -> Failure: + """ + get failure for given parameters, service, diagnostic session, sub-function, + identifier and response at given analysis mode. + """ + if op_mode == OpMode.VEN_SPEC: + supp_serv_vec = self.supp_serv_ven_vec + if op_mode == OpMode.ISO: + supp_serv_vec = self.supp_serv_iso_vec + + cond_serv_supp = serv in supp_serv_vec + cond_resp_alwd = self.check_resp_alwd(serv, resp) + cond_sbfn_alwd = self.check_sbfn_alwd(serv, sbfn, op_mode, ecu_mode) + cond_resp_serv_not_supp = resp == UDSErrorCodes.serviceNotSupported + cond_resp_sbfn_not_supp = resp == UDSErrorCodes.subFunctionNotSupported + + if (not cond_serv_supp) and (cond_resp_serv_not_supp): + return Failure.OK_IDEN_A + if (not cond_sbfn_alwd) and (cond_resp_sbfn_not_supp): + return Failure.OK_IDEN_B + + try: + combi = (sess, boot, sbfn, iden, ecu_mode) + combis_ls = list(self.lu_iden_df[ColNm.combi]) + cond_combi = combi in combis_ls + cond_combi_aem = False + for cur_mode in np.arange(self.num_modes): + combi = (sess, boot, sbfn, iden, cur_mode) + combis_ls = list(self.lu_iden_df[ColNm.combi]) + if combi in combis_ls: + cond_combi_aem = True + break + + except (KeyError, AttributeError) as exc: + self.log("getting failure for identifier failed", True, exc) + return Failure.UNKNOWN + + if cond_combi: + if resp == UDSErrorCodes.serviceNotSupportedInActiveSession: + return Failure.MISS_IDEN_A + + if resp == UDSErrorCodes.serviceNotSupported: + return Failure.MISS_IDEN_B + + if resp == UDSErrorCodes.requestOutOfRange: + return Failure.MISS_IDEN_C + + if serv == UDSIsoServices.WriteDataByIdentifier: + if resp == UDSErrorCodes.securityAccessDenied: + return Failure.MISS_IDEN_D + + if cond_resp_alwd: + return Failure.OK_IDEN_C + + if resp == 0: + return Failure.OK_IDEN_D + + if cond_combi_aem: + if resp == UDSErrorCodes.conditionsNotCorrect: + if serv == UDSIsoServices.ReadDataByIdentifier: + return Failure.OK_IDEN_E + + if serv == UDSIsoServices.RoutineControl: + return Failure.OK_IDEN_F + + if not cond_combi: + # general default response + if resp == UDSErrorCodes.requestOutOfRange: + return Failure.OK_IDEN_G + + if serv == UDSIsoServices.ReadDataByIdentifier: + if resp == UDSErrorCodes.incorrectMessageLengthOrInvalidFormat: + return Failure.DFT_RES_A + + if serv == UDSIsoServices.SecurityAccess: + if resp == UDSErrorCodes.subFunctionNotSupported: + return Failure.DFT_RES_B + + if resp == UDSErrorCodes.serviceNotSupportedInActiveSession: + return Failure.DFT_RES_B + + if resp == UDSErrorCodes.subFunctionNotSupportedInActiveSession: + return Failure.DFT_RES_B + + if serv == UDSIsoServices.RoutineControl: + if resp == UDSErrorCodes.subFunctionNotSupported: + return Failure.DFT_RES_C + + if serv == UDSIsoServices.WriteDataByIdentifier: + if resp == UDSErrorCodes.securityAccessDenied: + return Failure.DFT_RES_D + + if resp == UDSErrorCodes.incorrectMessageLengthOrInvalidFormat: + return Failure.DFT_RES_D + + if resp == UDSErrorCodes.serviceNotSupportedInActiveSession: + return Failure.DFT_RES_D + + if resp == UDSErrorCodes.conditionsNotCorrect: + return Failure.DFT_RES_D + + if resp == UDSErrorCodes.conditionsNotCorrect: + return Failure.UNDOC_IDEN_A + + if resp == UDSErrorCodes.subFunctionNotSupportedInActiveSession: + return Failure.UNDOC_IDEN_B + + if cond_resp_alwd: + return Failure.UNDOC_IDEN_C + + if not cond_resp_alwd: + return Failure.UNDOC_IDEN_D + + if resp == 0: + return Failure.UNDOC_IDEN_E + + return Failure.UNKNOWN diff --git a/src/gallia/analyzer/config.py b/src/gallia/analyzer/config.py new file mode 100644 index 000000000..c0fb5cf8f --- /dev/null +++ b/src/gallia/analyzer/config.py @@ -0,0 +1,134 @@ +""" +gallia-analyze Config module +""" +import importlib.resources +from enum import IntEnum +from gallia import analyzer +from gallia.analyzer.name_config import ColNm +from gallia.analyzer.constants import SqlDataType + +if __name__ == "__main__": + exit() + + +def load_resource_file(path: str) -> str: + """ + load resource file by name from package_data + + :param path: path to object within the package_data + :return: absolut path to resource + """ + pkg = importlib.resources.files(analyzer) + return str(pkg / path) + + +FAIL_CLS_CAP = 16 +NUM_ECU_MODES = 3 + +# default time precision for time analysis +# 19: nanosecond +# 16: microsecond +# 13: millisecond +DFT_T_PREC = 19 + + +class MiscError(IntEnum): + """ + enum class for undefined errors + """ + + UNKNOWN_ERROR = 0x80 + INVALID_RESPONSE = 0xA0 + NO_RESPONSE = -1 + POSITIVE_RESPONSE = 0 + + +class SrcPath: + """ + class for source paths + """ + + err_src = load_resource_file("json/responses.json") + uds_iso_src = load_resource_file("json/uds_iso_standard.json") + cond_src = load_resource_file("json/conditions.json") + + +class TblStruct: + """ + class for relational table structures + """ + + serv = { + ColNm.id: SqlDataType.integer, + ColNm.run: SqlDataType.integer, + ColNm.t_rqst: SqlDataType.integer, + ColNm.t_resp: SqlDataType.integer, + ColNm.ecu_mode: SqlDataType.integer, + ColNm.serv: SqlDataType.integer, + ColNm.sess: SqlDataType.integer, + ColNm.boot: SqlDataType.integer, + ColNm.resp: SqlDataType.integer, + ColNm.fail: SqlDataType.integer, + } + iden = { + ColNm.id: SqlDataType.integer, + ColNm.run: SqlDataType.integer, + ColNm.t_rqst: SqlDataType.integer, + ColNm.t_resp: SqlDataType.integer, + ColNm.ecu_mode: SqlDataType.integer, + ColNm.serv: SqlDataType.integer, + ColNm.sess: SqlDataType.integer, + ColNm.boot: SqlDataType.integer, + ColNm.sbfn: SqlDataType.integer, + ColNm.iden: SqlDataType.integer, + ColNm.resp: SqlDataType.integer, + ColNm.fail: SqlDataType.integer, + } + ven_lu = { + ColNm.serv: SqlDataType.integer, + ColNm.sess: SqlDataType.integer, + ColNm.boot: SqlDataType.integer, + ColNm.sbfn: SqlDataType.integer, + ColNm.iden: SqlDataType.integer, + ColNm.ecu_mode: SqlDataType.integer, + } + ven_sess = { + ColNm.sess_name: SqlDataType.text, + ColNm.sess: SqlDataType.integer, + } + ref_resp = { + ColNm.serv: SqlDataType.integer, + ColNm.resp: SqlDataType.integer, + } + ref_sbfn = { + ColNm.serv: SqlDataType.integer, + ColNm.sbfn: SqlDataType.integer, + } + ref_sess = { + ColNm.serv: SqlDataType.integer, + ColNm.sess: SqlDataType.integer, + ColNm.boot: SqlDataType.integer, + } + + +class XlDesign: + """ + class for EXCEL report design + """ + + font_index = "Calibri" + font_value = "Courier New" + + dim_wide = 45 + dim_mid_wide = 32 + dim_middle = 25 + dim_narrow = 10 + + +class PltDesign: + """ + class for matplotlib graph design + """ + + hist_style = "dark_background" + plot_style = "dark_background" diff --git a/src/gallia/analyzer/constants.py b/src/gallia/analyzer/constants.py new file mode 100644 index 000000000..c2f9abdde --- /dev/null +++ b/src/gallia/analyzer/constants.py @@ -0,0 +1,30 @@ +""" +gallia-analyze Const module +""" +from enum import IntEnum + +if __name__ == "__main__": + exit() + + +class UDSIsoSessions(IntEnum): + """ + enum class for diagnostic sessions defined in UDS ISO standard + """ + + DEFAULT_SESSION = 0x01 + PROGRAMMING_SESSION = 0x02 + EXTENDED_DIAGNOSTIC_SESSION = 0x03 + SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04 + + +class SqlDataType: + """ + class for SQL data types + """ + + integer = "INTEGER" + text = "TEXT" + null = "NULL" + real = "REAL" + blob = "BLOB" diff --git a/src/gallia/analyzer/db_handler.py b/src/gallia/analyzer/db_handler.py new file mode 100644 index 000000000..6688eadb5 --- /dev/null +++ b/src/gallia/analyzer/db_handler.py @@ -0,0 +1,194 @@ +""" +gallia-analyze Database Handler module +""" +import os +import sys +import sqlite3 +from sqlite3 import OperationalError +import pandas as pd +from pandas.io.sql import DatabaseError +from gallia.analyzer.mode_config import LogMode +from gallia.analyzer.name_config import ColNm + +if __name__ == "__main__": + exit() + + +class DatabaseHandler: + """ + Basic class for all classes in gallia-analyze. + Used for database connection, reading and writing data and log. + """ + + def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT) -> None: + self.set_db_path(path) + self.log_mode = log_mode + self.msg_head = "[DatabaseHandler] " + self.err_head = " " + self.log_file = "logfile.txt" + self.con: sqlite3.Connection + self.cur: sqlite3.Cursor + self.connect_db() + + def log(self, msg: str = "", err_flag: bool = False, exc: Exception = None) -> None: + """ + print program messages in console or log program messages in log file. + """ + if err_flag: + if exc is None: + total_msg = self.msg_head + self.err_head + msg + "\n" + else: + total_msg = ( + self.msg_head + + self.err_head + + msg + + f": {type(exc).__name__} {str(exc)}" + + "\n" + ) + else: + total_msg = self.msg_head + msg + "\n" + if self.log_mode == LogMode.LOG_FILE: + try: + with open(self.log_file, "a", encoding="utf8") as logfile: + logfile.write(total_msg) + except FileNotFoundError: + sys.stdout.write(total_msg) + if self.log_mode == LogMode.STD_OUT: + sys.stdout.write(total_msg) + + def set_db_path(self, path: str = "") -> bool: + """ + set path for database to read. + """ + if path != "": + self.db_path = os.path.expanduser(path) + else: + self.db_path = "" + return False + return True + + def connect_db(self) -> bool: + """ + establish connection to database. + """ + try: + self.con = sqlite3.connect(self.db_path) + self.cur = self.con.cursor() + except (OperationalError) as exc: + self.log("DB connection failed", True, exc) + return False + return True + + def create_table( + self, table_name: str, columns_dict: dict, not_exists: bool = False + ) -> bool: + """ + create a relational table in the database. + """ + sql_columns = "" + for key in columns_dict.keys(): + sql_columns += '"' + key + '" ' + sql_columns += columns_dict[key] + sql_columns += "," + + sql_columns = sql_columns[:-1] + if not_exists: + create_sql = f"CREATE TABLE IF NOT EXISTS {table_name}({sql_columns});" + else: + create_sql = f"DROP TABLE IF EXISTS {table_name};CREATE TABLE {table_name}({sql_columns});" + try: + self.cur.executescript(create_sql) + self.con.commit() + except (OperationalError, AttributeError) as exc: + self.log("DB creating table failed", True, exc) + return False + return True + + def clear_table(self, table_name: str) -> bool: + """ + clear(delete) all data in a relational table in the database. + """ + try: + self.cur.execute(f"DELETE FROM {table_name}") + self.con.commit() + except (OperationalError, AttributeError) as exc: + self.log("DB clearing table failed", True, exc) + return False + return True + + def delete_table(self, table_name: str) -> bool: + """ + delete(drop) a relational table in the database. + """ + try: + self.cur.execute(f"DROP TABLE IF EXISTS {table_name}") + self.con.commit() + except (OperationalError, AttributeError) as exc: + self.log("DB deleting table failed", True, exc) + return False + return True + + def get_df_by_query(self, sql: str, error_on: bool = True) -> pd.DataFrame: + """ + query in a database with SQL query string. + """ + try: + raw_df: pd.DataFrame = pd.read_sql_query(sql, self.con) + except (DatabaseError, AttributeError) as exc: + if error_on: + self.log("DB query failed", True, exc) + return pd.DataFrame() + if raw_df.shape[0] == 0: + if error_on: + self.log("no entry in database.", True) + return pd.DataFrame() + return raw_df + + def read_db(self, table_name: str) -> pd.DataFrame: + """ + read out all the data in a relational table in the database. + returns a pandas data frame. + """ + return self.get_df_by_query(f"""SELECT * FROM "{table_name}";""") + + def read_run_db(self, table_name: str, run: int) -> pd.DataFrame: + """ + read out the data of a run in a relational table in the database. + returns a pandas data frame. + """ + return self.get_df_by_query( + f"""SELECT * FROM "{table_name}" WHERE "{ColNm.run}" = {str(run)};""" + ) + + def read_sid_db(self, table_name: str, serv: int) -> pd.DataFrame: + """ + read out the data of a service ID in a relational table in the database. + returns a pandas data frame. + """ + return self.get_df_by_query( + f"SELECT * FROM {table_name} WHERE {ColNm.serv} = {str(serv)}" + ) + + def delete_run_db(self, table_name: str, run: int) -> bool: + """ + delete the data of a run in a relational table in the database. + """ + del_sql = f"""DELETE FROM "{table_name}" WHERE "{ColNm.run}" = {str(run)};""" + try: + self.cur.executescript(del_sql) + self.con.commit() + except (OperationalError, AttributeError) as exc: + self.log("deleting a run from DB failed", True, exc) + return False + return True + + def write_db(self, raw_df: pd.DataFrame, table_name: str) -> bool: + """ + write data into a relational table in the database + """ + try: + raw_df.to_sql(table_name, self.con, if_exists="append", index=False) + except (OperationalError, AttributeError) as exc: + self.log("writing data to DB failed", True, exc) + return False + return True diff --git a/src/gallia/analyzer/exceptions.py b/src/gallia/analyzer/exceptions.py new file mode 100644 index 000000000..2a66075b6 --- /dev/null +++ b/src/gallia/analyzer/exceptions.py @@ -0,0 +1,24 @@ +""" +gallia-analyze Exceptions module +""" + +if __name__ == "__main__": + exit() + + +class EmptyTableException(Exception): + """ + exception class for empty table error + """ + + def __init__(self): + super().__init__("Empty Table.") + + +class ColumnMismatchException(Exception): + """ + exception class for column mismatch + """ + + def __init__(self): + super().__init__("Columns Mismatch.") diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py new file mode 100644 index 000000000..70d2263cd --- /dev/null +++ b/src/gallia/analyzer/extractor.py @@ -0,0 +1,192 @@ +""" +gallia-analyze Extractor module +""" + +from sqlite3 import OperationalError +import numpy as np +from gallia.analyzer.operator import Operator +from gallia.analyzer.config import TblStruct +from gallia.analyzer.mode_config import LogMode, ScanMode +from gallia.analyzer.name_config import TblNm, ColNm, VwNm + +if __name__ == "__main__": + exit() + + +class Extractor(Operator): + + """ + Class for extracting attained scan result data in database, + archiving it into relational tables. + """ + + def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): + Operator.__init__(self, path, log_mode) + self.msg_head = "[Extractor] " + + def extract(self, runs_vec: np.ndarray) -> bool: + """ + extract scan result data from JSON form in the database + and save it into relational tables for given input runs. + """ + if not self.load_meta(force=True): + return False + for run in runs_vec: + self.extract_each_run(run) + return True + + def extract_each_run(self, run: int) -> bool: + """ + extract scan result data from JSON form in the database + and save it into relational tables for a certain input run. + """ + self.log(f"extracting run #{str(run)} from {self.db_path} ...") + self.check_boot(run) + scan_mode = self.get_scan_mode(run) + if scan_mode == ScanMode.SERV: + return self.extract_serv(run) + if scan_mode == ScanMode.IDEN: + return self.extract_iden(run) + return False + + def extract_serv(self, run: int) -> bool: + """ + extract scan_service result data from JSON form in the database + and save it into relational tables for a certain input run. + """ + if self.get_scan_mode(run) != ScanMode.SERV: + return False + if not self.create_table(TblNm.serv, TblStruct.serv, True): + return False + if not self.delete_run_db(TblNm.serv, run): + return False + extract_sql = f""" + DROP VIEW IF EXISTS "{VwNm.resp_vw}"; + CREATE VIEW "{VwNm.resp_vw}" + AS SELECT "{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", + json_extract("request_data", '$.service_id') AS "{ColNm.serv}", + json_extract("state", '$.session') AS "{ColNm.sess}", + json_extract("state", '$.boot') AS "{ColNm.boot}", + CASE WHEN json_extract("response_data", '$.service_id') != 127 THEN 0 + WHEN json_extract("response_data", '$.response_code') IS NULL THEN -1 + ELSE json_extract("response_data", '$.response_code') + END "{ColNm.resp}" + FROM "{TblNm.scan_result}" WHERE "{ColNm.run}" = {str(run)} + AND "log_mode" = "explicit" OR "log_mode" = "emphasized"; + INSERT INTO "{TblNm.serv}" ("{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", + "{ColNm.ecu_mode}", "{ColNm.serv}", + "{ColNm.sess}", "{ColNm.boot}", "{ColNm.resp}") + SELECT "{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", + CASE WHEN "{ColNm.ecu_mode}" IS NULL THEN 0 + ELSE "{ColNm.ecu_mode}" + END "{ColNm.ecu_mode}", + "{ColNm.serv}", "{ColNm.sess}", + CASE WHEN "{ColNm.boot}" IS NULL AND "{ColNm.sess}" = 2 THEN 1 + WHEN "{ColNm.boot}" IS NULL THEN 0 + ELSE "{ColNm.boot}" + END "{ColNm.boot}", + "{ColNm.resp}" + FROM "{VwNm.resp_vw}" + LEFT JOIN "{TblNm.meta}" + ON "{TblNm.meta}"."{ColNm.run_id}" = "{VwNm.resp_vw}"."{ColNm.run}"; + UPDATE "{TblNm.serv}" SET "{ColNm.fail}" = 255; + DROP VIEW IF EXISTS "{VwNm.resp_vw}"; + """ + try: + self.cur.executescript(extract_sql) + self.con.commit() + except (OperationalError) as exc: + self.log("extracting scan_service failed", True, exc) + return False + return True + + def extract_iden(self, run: int) -> bool: + """ + extract scan_identifier result data from JSON form in the database + and save it into relational tables for a certain input run. + """ + if self.get_scan_mode(run) != ScanMode.IDEN: + return False + if not self.create_table(TblNm.iden, TblStruct.iden, True): + return False + if not self.delete_run_db(TblNm.iden, run): + return False + extract_sql = f""" + DROP VIEW IF EXISTS "{VwNm.resp_vw}"; + CREATE VIEW "{VwNm.resp_vw}" + AS SELECT "{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", + json_extract("request_data", '$.service_id') AS "{ColNm.serv}", + json_extract("state", '$.session') AS "{ColNm.sess}", + json_extract("state", '$.boot') AS "{ColNm.boot}", + CASE WHEN json_extract("request_data", '$.service_id') = 49 + THEN json_extract("request_data", '$.sub_function') + ELSE -1 + END "{ColNm.sbfn}", + CASE WHEN json_extract("request_data", '$.service_id') = 49 + THEN json_extract("request_data", '$.routine_identifier') + WHEN json_extract("request_data", '$.service_id') = 39 + AND json_extract("request_data", '$.sub_function') IS NULL THEN -1 + WHEN json_extract("request_data", '$.service_id') = 39 + THEN json_extract("request_data", '$.sub_function') + WHEN json_extract("request_data", '$.data_identifier') IS NULL + THEN json_extract("request_data", '$.data_identifiers[0]') + ELSE json_extract("request_data", '$.data_identifier') + END "{ColNm.iden}", + json_extract("request_data", '$.identifier') AS "{ColNm.iden}", + CASE WHEN json_extract("response_data", '$.service_id') != 127 THEN 0 + WHEN json_extract("response_data", '$.response_code') IS NULL THEN -1 + ELSE json_extract("response_data", '$.response_code') + END "{ColNm.resp}" + FROM "{TblNm.scan_result}" + WHERE "{ColNm.run}" = {str(run)} + AND "log_mode" = "explicit" OR "log_mode" = "emphasized"; + INSERT INTO "{TblNm.iden}" ("{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", "{ColNm.ecu_mode}", + "{ColNm.serv}", "{ColNm.sess}", "{ColNm.boot}", + "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.resp}") + SELECT "{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", + CASE WHEN "{ColNm.ecu_mode}" IS NULL THEN 0 + ELSE "{ColNm.ecu_mode}" + END "{ColNm.ecu_mode}", + "{ColNm.serv}", "{ColNm.sess}", + CASE WHEN "{ColNm.boot}" IS NULL AND "{ColNm.sess}" = 2 THEN 1 + WHEN "{ColNm.boot}" IS NULL THEN 0 + ELSE "{ColNm.boot}" + END "{ColNm.boot}", + "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.resp}" + FROM "{VwNm.resp_vw}" + LEFT JOIN "{TblNm.meta}" + ON "{TblNm.meta}"."{ColNm.run_id}" = "{VwNm.resp_vw}"."{ColNm.run}"; + UPDATE "{TblNm.iden}" SET "{ColNm.fail}" = 255; + DROP VIEW IF EXISTS "{VwNm.resp_vw}"; + """ + try: + self.cur.executescript(extract_sql) + self.con.commit() + except (OperationalError) as exc: + self.log("extracting scan_identifier failed", True, exc) + return False + return True + + def check_boot(self, run: int) -> bool: + try: + check_sql = f""" + SELECT json_extract("state", '$.boot') as "{ColNm.boot}" + FROM "{TblNm.scan_result}" WHERE "{ColNm.run}" = {str(run)}; + """ + boot_df = self.get_df_by_query(check_sql) + if boot_df.shape[0] == 0: + return False + boot_types_vec = np.array([0, 1]) # vendor-specific + boot_ok = boot_df[ColNm.boot].apply(lambda x: x in boot_types_vec).all() + if not boot_ok: + self.log("boot information not complete.", True) + except (KeyError, AttributeError, OperationalError) as exc: + self.log("checking boot information failed", True, exc) + return False + return boot_ok diff --git a/src/gallia/analyzer/failure.py b/src/gallia/analyzer/failure.py new file mode 100644 index 000000000..cccebcb20 --- /dev/null +++ b/src/gallia/analyzer/failure.py @@ -0,0 +1,135 @@ +""" +gallia-analyze Failure module +""" +from enum import IntEnum + +if __name__ == "__main__": + exit() + + +class Failure(IntEnum): + """ + enum class for failures + """ + + UNKNOWN = 0xFF + UNDEFINED = 0xF0 + + OK_SERV = 0x00 + OK_SERV_A = 0x01 + OK_SERV_B = 0x02 + OK_SERV_C = 0x03 + OK_SERV_D = 0x04 + OK_SERV_E = 0x05 + OK_SERV_F = 0x06 + OK_SERV_G = 0x07 + OK_SERV_H = 0x08 + OK_SERV_I = 0x09 + OK_SERV_J = 0x0A + OK_SERV_K = 0x0B + OK_SERV_L = 0x0C + OK_SERV_M = 0x0D + OK_SERV_N = 0x0E + OK_SERV_O = 0x0F + + OK_IDEN = 0x10 + OK_IDEN_A = 0x11 + OK_IDEN_B = 0x12 + OK_IDEN_C = 0x13 + OK_IDEN_D = 0x14 + OK_IDEN_E = 0x15 + OK_IDEN_F = 0x16 + OK_IDEN_G = 0x17 + OK_IDEN_H = 0x18 + OK_IDEN_I = 0x19 + OK_IDEN_J = 0x1A + OK_IDEN_K = 0x1B + OK_IDEN_L = 0x1C + OK_IDEN_M = 0x1D + OK_IDEN_N = 0x1E + OK_IDEN_O = 0x1F + + UNDOC_SERV = 0x20 + UNDOC_SERV_A = 0x21 + UNDOC_SERV_B = 0x22 + UNDOC_SERV_C = 0x23 + UNDOC_SERV_D = 0x24 + UNDOC_SERV_E = 0x25 + UNDOC_SERV_F = 0x26 + UNDOC_SERV_G = 0x27 + UNDOC_SERV_H = 0x28 + UNDOC_SERV_I = 0x29 + UNDOC_SERV_J = 0x2A + UNDOC_SERV_K = 0x2B + UNDOC_SERV_L = 0x2C + UNDOC_SERV_M = 0x2D + UNDOC_SERV_N = 0x2E + UNDOC_SERV_O = 0x2F + + UNDOC_IDEN = 0x30 + UNDOC_IDEN_A = 0x31 + UNDOC_IDEN_B = 0x32 + UNDOC_IDEN_C = 0x33 + UNDOC_IDEN_D = 0x34 + UNDOC_IDEN_E = 0x35 + UNDOC_IDEN_F = 0x36 + UNDOC_IDEN_G = 0x37 + UNDOC_IDEN_H = 0x38 + UNDOC_IDEN_I = 0x39 + UNDOC_IDEN_J = 0x3A + UNDOC_IDEN_K = 0x3B + UNDOC_IDEN_L = 0x3C + UNDOC_IDEN_M = 0x3D + UNDOC_IDEN_N = 0x3E + UNDOC_IDEN_O = 0x3F + + MISS_SERV = 0x40 + MISS_SERV_A = 0x41 + MISS_SERV_B = 0x42 + MISS_SERV_C = 0x43 + MISS_SERV_D = 0x44 + MISS_SERV_E = 0x45 + MISS_SERV_F = 0x46 + MISS_SERV_G = 0x47 + MISS_SERV_H = 0x48 + MISS_SERV_I = 0x49 + MISS_SERV_J = 0x4A + MISS_SERV_K = 0x4B + MISS_SERV_L = 0x4C + MISS_SERV_M = 0x4D + MISS_SERV_N = 0x4E + MISS_SERV_O = 0x4F + + MISS_IDEN = 0x50 + MISS_IDEN_A = 0x51 + MISS_IDEN_B = 0x52 + MISS_IDEN_C = 0x53 + MISS_IDEN_D = 0x54 + MISS_IDEN_E = 0x55 + MISS_IDEN_F = 0x56 + MISS_IDEN_G = 0x57 + MISS_IDEN_H = 0x58 + MISS_IDEN_I = 0x59 + MISS_IDEN_J = 0x5A + MISS_IDEN_K = 0x5B + MISS_IDEN_L = 0x5C + MISS_IDEN_M = 0x5D + MISS_IDEN_N = 0x5E + MISS_IDEN_O = 0x5F + + DFT_RES = 0x70 + DFT_RES_A = 0x71 + DFT_RES_B = 0x72 + DFT_RES_C = 0x73 + DFT_RES_D = 0x74 + DFT_RES_E = 0x75 + DFT_RES_F = 0x76 + DFT_RES_G = 0x77 + DFT_RES_H = 0x78 + DFT_RES_I = 0x79 + DFT_RES_J = 0x7A + DFT_RES_K = 0x7B + DFT_RES_L = 0x7C + DFT_RES_M = 0x7D + DFT_RES_N = 0x7E + DFT_RES_O = 0x7F diff --git a/src/gallia/analyzer/iso_def.py b/src/gallia/analyzer/iso_def.py new file mode 100644 index 000000000..cb661cfd9 --- /dev/null +++ b/src/gallia/analyzer/iso_def.py @@ -0,0 +1,6 @@ +""" +gallia-analyze ISO DEF module +""" +ISO_ERR_NOT_SUPP = [0x11, 0x12, 0x7E, 0x7F] +ISO_ERR_FOR_ALL = [0x10, 0x11, 0x12, 0x7E, 0x7F] +ISO_SERV_BY_ID = [0x22, 0x24, 0x27, 0x2A, 0x2C, 0x2E, 0x2F, 0x31] diff --git a/src/gallia/analyzer/json/conditions.json b/src/gallia/analyzer/json/conditions.json new file mode 100644 index 000000000..229773437 --- /dev/null +++ b/src/gallia/analyzer/json/conditions.json @@ -0,0 +1,36 @@ +[ + {"SCAN MODE":"scan-service", "FAILURE":"UNDOC_SERV", "RESPONDED":[128, 160]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV", "RESPONDED":[0]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_A", "NOT KNOWN":["service"], "RESPONDED":["serviceNotSupported"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_B", "NOT KNOWN":["service"], "RESPONDED":["NO_RESPONSE"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_C", "NOT SUPPORTED":["service"], "RESPONDED":["serviceNotSupported", "subFunctionNotSupported", "subFunctionNotSupportedInActiveSession", "serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_D", "NOT SUPPORTED":["service"], "RESPONDED":["NO_RESPONSE"]}, + {"SCAN MODE":"scan-service", "FAILURE":"UNDOC_SERV_A", "NOT SUPPORTED":["service"], "NOT RESPONDED":["serviceNotSupported", "subFunctionNotSupported", "subFunctionNotSupportedInActiveSession", "serviceNotSupportedInActiveSession", "NO_RESPONSE"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_E", "SUPPORTED":["service"], "NOT SUPPORTED":["session"], "RESPONDED":["serviceNotSupported", "subFunctionNotSupported", "subFunctionNotSupportedInActiveSession", "serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_F", "SUPPORTED":["service", "session", "response"], "NOT RESPONDED":["serviceNotSupported", "subFunctionNotSupported", "subFunctionNotSupportedInActiveSession", "serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_G", "SUPPORTED":["service", "session"], "NOT RESPONDED":["serviceNotSupported", "subFunctionNotSupported", "subFunctionNotSupportedInActiveSession", "serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-service", "FAILURE":"OK_SERV_H", "SUPPORTED":["service", "session"], "RESPONDED":["subFunctionNotSupported"]}, + {"SCAN MODE":"scan-service", "FAILURE":"UNDOC_SERV_B", "SUPPORTED":["service"], "NOT SUPPORTED":["session"], "NOT RESPONDED":["serviceNotSupported", "subFunctionNotSupported", "subFunctionNotSupportedInActiveSession", "serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-service", "FAILURE":"MISS_SERV_A", "SUPPORTED":["service", "session"], "RESPONDED":["serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-service", "FAILURE":"MISS_SERV_B", "SUPPORTED":["service", "session"], "RESPONDED":["serviceNotSupported"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_A", "NOT SUPPORTED":["service"], "RESPONDED":["serviceNotSupported"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_B", "NOT SUPPORTED":["subfunc"], "RESPONDED":["subFunctionNotSupported"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"MISS_IDEN_A", "MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":["serviceNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"MISS_IDEN_B", "MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":["serviceNotSupported"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"MISS_IDEN_C", "MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":["requestOutOfRange"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"MISS_IDEN_D", "MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "FOR SERVICE":["WriteDataByIdentifier"], "RESPONDED":["securityAccessDenied"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_C", "MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "SUPPORTED":["response"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_D", "MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":[0]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_E", "MATCH":["session", "boot", "service", "subfunc", "identifier"], "FOR SERVICE":["ReadDataByIdentifier"], "RESPONDED":["conditionsNotCorrect"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_F", "MATCH":["session", "boot", "service", "subfunc", "identifier"], "FOR SERVICE":["RoutineControl"], "RESPONDED":["conditionsNotCorrect"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"OK_IDEN_G", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":["requestOutOfRange"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"DFT_RES_A", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "FOR SERVICE":["ReadDataByIdentifier"], "RESPONDED":["incorrectMessageLengthOrInvalidFormat"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"DFT_RES_B", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "FOR SERVICE":["SecurityAccess"], "RESPONDED":["subFunctionNotSupported", "serviceNotSupportedInActiveSession", "subFunctionNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"DFT_RES_C", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "FOR SERVICE":["RoutineControl"], "RESPONDED":["subFunctionNotSupported"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"DFT_RES_D", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "FOR SERVICE":["WriteDataByIdentifier"], "RESPONDED":["securityAccessDenied", "incorrectMessageLengthOrInvalidFormat", "serviceNotSupportedInActiveSession", "conditionsNotCorrect"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"UNDOC_IDEN_A", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":["conditionsNotCorrect"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"UNDOC_IDEN_B", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":["subFunctionNotSupportedInActiveSession"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"UNDOC_IDEN_C", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "SUPPORTED":["response"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"UNDOC_IDEN_D", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "NOT SUPPORTED":["response"]}, + {"SCAN MODE":"scan-identifier", "FAILURE":"UNDOC_IDEN_E", "NOT MATCH":["session", "boot", "service", "subfunc", "identifier", "ecu_mode"], "RESPONDED":[0]} +] \ No newline at end of file diff --git a/src/gallia/analyzer/json/responses.json b/src/gallia/analyzer/json/responses.json new file mode 100644 index 000000000..87fd81b5f --- /dev/null +++ b/src/gallia/analyzer/json/responses.json @@ -0,0 +1,63 @@ +[ +{"response_name":"NO_RESPONSE", "response": -1, "rgb":"FFFFFF"}, +{"response_name":"POSITIVE_RESPONSE", "response": 0, "rgb":"FFFFFF"}, +{"response_name":"generalReject", "response":16, "rgb":"F5DEB3"}, +{"response_name":"serviceNotSupported", "response":17, "rgb":"800000"}, +{"response_name":"subFunctionNotSupported", "response":18, "rgb":"DC143C"}, +{"response_name":"incorrectMessageLengthOrInvalidFormat", "response":19, "rgb":"FFDEAD"}, +{"response_name":"responseTooLong", "response":20, "rgb":"FFFF00"}, +{"response_name":"busyRepeatRequest", "response":33, "rgb":"00CED1"}, +{"response_name":"conditionsNotCorrect", "response":34, "rgb":"5F9EA0"}, +{"response_name":"requestSequenceError", "response":36, "rgb":"00FFFF"}, +{"response_name":"noResponseFromSubnetComponent", "response":37, "rgb":"7FFFD4"}, +{"response_name":"failurePreventsExecutionOfRequestedAction", "response":38, "rgb":"6495ED"}, +{"response_name":"requestOutOfRange", "response":49, "rgb":"808000"}, +{"response_name":"securityAccessDenied", "response":51, "rgb":"9ACD32"}, +{"response_name":"authenticationRequired", "response":52, "rgb":"7CFC00"}, +{"response_name":"invalidKey", "response":53, "rgb":"228B22"}, +{"response_name":"exceededNumberOfAttempts", "response":54, "rgb":"00FA9A"}, +{"response_name":"requiredTimeDelayNotExpired", "response":55, "rgb":"2E8B57"}, +{"response_name":"secureDataTransmissionRequired", "response":56, "rgb":"3CB371"}, +{"response_name":"secureDataTransmissionNotAllowed", "response":57, "rgb":"008B8B"}, +{"response_name":"secureDataVerificationFailed", "response":58, "rgb":"32CD32"}, +{"response_name":"certificateVerificationFailedInvalidTimePeriod", "response":80, "rgb":"4B0082"}, +{"response_name":"certificateVerificationFailedInvalidSignature", "response":81, "rgb":"9370DB"}, +{"response_name":"certificateVerificationFailedInvalidChainOfTrust", "response":82, "rgb":"800080"}, +{"response_name":"certificateVerificationFailedInvalidType", "response":83, "rgb":"BA55D3"}, +{"response_name":"certificateVerificationFailedInvalidFormat", "response":84, "rgb":"7B68EE"}, +{"response_name":"certificateVerificationFailedInvalidContent", "response":85, "rgb":"9400D3"}, +{"response_name":"certificateVerificationFailedInvalidScope", "response":86, "rgb":"D8BFD8"}, +{"response_name":"certificateVerificationFailedInvalidCertificateRevoked", "response":87, "rgb":"000080"}, +{"response_name":"ownershipVerificationFailed", "response":88, "rgb":"696969"}, +{"response_name":"challengeCalculationFailed", "response":89, "rgb":"808080"}, +{"response_name":"settingAccessRightsFailed", "response":90, "rgb":"A9A9A9"}, +{"response_name":"sessionKeyCreationOrDerivationFailed", "response":91, "rgb":"C0C0C0"}, +{"response_name":"configurationDataUsageFailed", "response":92, "rgb":"D3D3D3"}, +{"response_name":"deAuthenticationFailed", "response":93, "rgb":"B0C4DE"}, +{"response_name":"uploadDownloadNotAccepted", "response":112, "rgb":"FF4500"}, +{"response_name":"transferDataSuspended", "response":113, "rgb":"FF8C00"}, +{"response_name":"generalProgrammingFailure", "response":114, "rgb":"B8860B"}, +{"response_name":"wrongBlockSequenceCounter", "response":115, "rgb":"DAA520"}, +{"response_name":"requestCorrectlyReceivedResponsePending", "response":120, "rgb":"D2691E"}, +{"response_name":"subFunctionNotSupportedInActiveSession", "response":126, "rgb":"FF7F50"}, +{"response_name":"serviceNotSupportedInActiveSession", "response":127, "rgb":"CD5C5C"}, +{"response_name":"rpmTooHigh", "response":129, "rgb":"FFFACD"}, +{"response_name":"rpmTooLow", "response":130, "rgb":"FFFACD"}, +{"response_name":"engineIsRunning", "response":131, "rgb":"F5F5DC"}, +{"response_name":"engineIsNotRunning", "response":132, "rgb":"F5F5DC"}, +{"response_name":"engineRunTimeTooLow", "response":133, "rgb":"F4A460"}, +{"response_name":"temperatureTooHigh", "response":134, "rgb":"CD853F"}, +{"response_name":"temperatureTooLow", "response":135, "rgb":"CD853F"}, +{"response_name":"vehicleSpeedTooHigh", "response":136, "rgb":"FF6347"}, +{"response_name":"vehicleSpeedTooLow", "response":137, "rgb":"FF6347"}, +{"response_name":"throttlePedalTooHigh", "response":138, "rgb":"F08080"}, +{"response_name":"throttlePedalTooLow", "response":139, "rgb":"F08080"}, +{"response_name":"transmissionRangeNotInNeutral", "response":140, "rgb":"E9967A"}, +{"response_name":"transmissionRangeNotInGear", "response":141, "rgb":"E9967A"}, +{"response_name":"brakeSwitchNotClosed", "response":143, "rgb":"FF4500"}, +{"response_name":"shifterLeverNotInPark", "response":144, "rgb":"DDA0DD"}, +{"response_name":"torqueConverterClutchLocked", "response":145, "rgb":"DB7093"}, +{"response_name":"voltageTooHigh", "response":146, "rgb":"FF69B4"}, +{"response_name":"voltageTooLow", "response":147, "rgb":"FFB6C1"}, +{"response_name":"resourceTemporarilyNotAvailable", "response":148, "rgb":"C71585"} +] \ No newline at end of file diff --git a/src/gallia/analyzer/json/uds_iso_standard.json b/src/gallia/analyzer/json/uds_iso_standard.json new file mode 100644 index 000000000..5df08c532 --- /dev/null +++ b/src/gallia/analyzer/json/uds_iso_standard.json @@ -0,0 +1,28 @@ +[ +{"service":16,"service_name":"DiagnosticSessionControl","session":[1,2,3,4],"subfunc":[1,2,3,4],"response":[18,19,34]}, +{"service":17,"service_name":"ECUReset","session":[1,2,3,4],"subfunc":[1,2,3,4,5],"response":[18,19,34,51]}, +{"service":39,"service_name":"SecurityAccess","session":[2,3,4],"subfunc":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,95,96],"response":[18,19,34,36,49,53,54,55]}, +{"service":40,"service_name":"CommunicationControl","session":[2,3,4],"subfunc":[0,1,2,3,4,5],"response":[18,19,34,49]}, +{"service":41,"service_name":"Authentification","session":[1,2,3,4],"subfunc":[0,1,2,3,4,5,6,7,8],"response":[18,19,34,36]}, +{"service":62,"service_name":"TesterPresent","session":[1,2,3,4],"subfunc":[0],"response":[18,19]}, +{"service":133,"service_name":"ControlDTCSetting","session":[2,3,4],"subfunc":[0,1,2],"response":[18,19,34,49]}, +{"service":134,"service_name":"ResponseOnEvent","session":[1,2,3,4],"subfunc":[1,2,3,4,5,6,7,8,9],"response":[18,19,34,49]}, +{"service":135,"service_name":"LinkControl","session":[2,3,4],"subfunc":[1,2,3],"response":[18,19,34,36,49]}, +{"service":34,"service_name":"ReadDataByIdentifier","session":[1,2,3,4],"subfunc":[-1],"response":[18,19,34,49,51]}, +{"service":35,"service_name":"ReadMemoryByAddress","session":[1,2,3,4],"subfunc":[-1],"response":[18,19,34,49,51]}, +{"service":36,"service_name":"ReadScalingDataByIdentifier","session":[1,2,3,4],"subfunc":[-1],"response":[19,34,49,51,52]}, +{"service":42,"service_name":"ReadDataByPeriodicIdentifier","session":[2,3,4],"subfunc":[1,2,3,4],"response":[19,34,49,51]}, +{"service":44,"service_name":"DynamicallyDefineDataIdentifier","session":[1,2,3,4],"subfunc":[1,2,3],"response":[18,19,34,49,51]}, +{"service":46,"service_name":"WriteDataByIdentifier","session":[1,2,3,4],"subfunc":[-1],"response":[19,34,49,51,114]}, +{"service":61,"service_name":"WriteMemoryByAddress","session":[1,2,3,4],"subfunc":[-1],"response":[19,34,49,51,52,114]}, +{"service":20,"service_name":"ClearDiagnosticInformation","session":[1,2,3,4],"subfunc":[-1],"response":[19,34,49,114]}, +{"service":25,"service_name":"ReadDTCInformation","session":[1,2,3,4],"subfunc":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,21,22,23,24,25,26,66,85,86],"response":[18,19,49]}, +{"service":47,"service_name":"InputOutputControlByIdentifier","session":[2,3,4],"subfunc":[-1],"response":[19,34,49,51,52]}, +{"service":49,"service_name":"RoutineControl","session":[1,2,3,4],"subfunc":[1,2,3],"response":[18,19,34,36,49,51,114]}, +{"service":52,"service_name":"RequestDownload","session":[2,3,4],"subfunc":[-1],"response":[19,34,49,51,52,112]}, +{"service":53,"service_name":"RequestUpload","session":[2,3,4],"subfunc":[-1],"response":[19,34,49,51,52,112]}, +{"service":54,"service_name":"TransferData","session":[2,3,4],"subfunc":[-1],"response":[19,36,49,113,114,115,146,147]}, +{"service":55,"service_name":"RequestTransferExit","session":[2,3,4],"subfunc":[-1],"response":[19,36,49,114]}, +{"service":56,"service_name":"RequestFileTransfer","session":[2,3,4],"subfunc":[1,2,3,4,5,6],"response":[19,34,36,49,51,52,112]}, +{"service":132,"service_name":"SecuredDataTransmission","session":[2,3,4],"subfunc":[-1],"response":[19,58]} +] \ No newline at end of file diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py new file mode 100755 index 000000000..d8e6fe157 --- /dev/null +++ b/src/gallia/analyzer/main.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +""" +gallia-analyze main script +""" +import sys +import time +import argparse +import textwrap +import numpy as np +from gallia.analyzer.operator import Operator +from gallia.analyzer.analyzer import Analyzer +from gallia.analyzer.extractor import Extractor +from gallia.analyzer.reporter import Reporter +from gallia.analyzer.categorizer import Categorizer +from gallia.analyzer.time_analyzer import TimeAnalyzer +from gallia.analyzer.mode_config import LogMode +from gallia.analyzer.arg_help import ArgHelp + +# ========================================================== # +# [Rule for arguments] +# +# Command: one letter lowercase +# Functional Option: one letter uppercase +# Report Option: one word starting with uppercase +# Parameter: one word lowercase(sometimes with dash) +# ========================================================== # + +parser = argparse.ArgumentParser(usage=textwrap.dedent(ArgHelp.usage)) + +# Commands +grp_cmd = parser.add_argument_group("Command") +grp_cmd.add_argument("-a", action="store_true", help=ArgHelp.analyze) +grp_cmd.add_argument("-c", action="store_true", help=ArgHelp.clear) +grp_cmd.add_argument("-e", action="store_true", help=ArgHelp.extract) +grp_cmd.add_argument("-i", action="store_true", help=ArgHelp.aio_iden) +grp_cmd.add_argument("-r", action="store_true", help=ArgHelp.report) +grp_cmd.add_argument("-s", action="store_true", help=ArgHelp.aio_serv) +grp_cmd.add_argument("-t", action="store_true", help=ArgHelp.time) + +# Options +grp_opt = parser.add_argument_group("Option") +grp_opt.add_argument("-A", action="store_true", help=ArgHelp.all_serv) +grp_opt.add_argument("-D", action="store_true", help=ArgHelp.debug) +grp_opt.add_argument("-I", action="store_true", help=ArgHelp.iso) +grp_opt.add_argument("-L", action="store_true", help=ArgHelp.log) +grp_opt.add_argument("-P", action="store_true", help=ArgHelp.possible) +grp_opt.add_argument("-C", action="store_true", help=ArgHelp.cat) + +# Parameters +grp_param = parser.add_argument_group("Parameter") +grp_param.add_argument("--sid", type=int, help=ArgHelp.sid, default=-1) +grp_param.add_argument("--from", type=int, help=ArgHelp.first, default=0) +grp_param.add_argument("--to", type=int, help=ArgHelp.last, default=0) +grp_param.add_argument("--output", type=str, help=ArgHelp.output, default="") +grp_param.add_argument("--source", type=str, help=ArgHelp.source, default="") +grp_param.add_argument("--precision", type=int, help=ArgHelp.prec, default=0) + +args = vars(parser.parse_args()) + +# Commands +analyze_on = args["a"] +clear_on = args["c"] +extract_on = args["e"] +aio_identifier_on = args["i"] +report_on = args["r"] +aio_service_on = args["s"] +t_analyze_on = args["t"] + +# Functional Options +all_services_on = args["A"] +debug_on = args["D"] +iso_on = args["I"] +log_file_on = args["L"] +show_possible_on = args["P"] +categorizer_on = args["C"] + +# Parameters +service_id = args["sid"] +db_path = args["source"] +run_start = args["from"] +run_end = args["to"] + 1 +file_path = args["output"] +t_prec = args["precision"] + +if run_end <= run_start: + run_end = run_start + 1 + +if db_path == "": + print("Please set database path with --source option!") + sys.exit() + + +def main() -> None: + """ + gallia-analyze: command line mode main function + """ + start_time = time.process_time() + + if log_file_on: + log_mode = LogMode.LOG_FILE + else: + log_mode = LogMode.STD_OUT + + if run_start == 0 and run_end == 1: + operator = Operator(db_path) + runs_vec = operator.get_runs() + else: + runs_vec = np.arange(run_start, run_end) + + if clear_on or extract_on: + extractor = Extractor(db_path, log_mode) + + if clear_on: + extractor.clear() + + if extract_on: + extractor.extract(runs_vec) + + if analyze_on: + if categorizer_on: + analyzer = Categorizer(db_path, log_mode) + else: + analyzer = Analyzer(db_path, log_mode, debug_on) + an_opt = analyzer.get_op_mode(iso_on) + analyzer.analyze(runs_vec, an_opt) + + if t_analyze_on: + if t_prec > 0: + time_analyzer = TimeAnalyzer(db_path, t_prec, log_mode) + else: + time_analyzer = TimeAnalyzer(db_path, log_mode=log_mode) + time_analyzer.extract_tra(runs_vec) + time_analyzer.hist_tra(runs_vec) + time_analyzer.plot_tra(runs_vec) + + if report_on or aio_service_on or aio_identifier_on: + reporter = Reporter(db_path, log_mode) + + if report_on: + reporter.report_xl(runs_vec, show_possible_on, file_path) + + if aio_service_on: + reporter.consolidate_xl_serv(file_path, show_possible_on) + + if aio_identifier_on: + if all_services_on: + reporter.iterate_all(file_path, show_possible_on) + else: + if service_id == -1: + print("Please input Service ID with --sid option.") + else: + reporter.consolidate_xl_iden(service_id, file_path, show_possible_on) + + print(f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}") + + +if __name__ == "__main__": + main() diff --git a/src/gallia/analyzer/mode_config.py b/src/gallia/analyzer/mode_config.py new file mode 100644 index 000000000..e0754beb5 --- /dev/null +++ b/src/gallia/analyzer/mode_config.py @@ -0,0 +1,37 @@ +""" +gallia-analyze Mode Config module +""" + +from enum import IntEnum + +if __name__ == "__main__": + exit() + + +class ScanMode(IntEnum): + """ + enum class for scan mode + """ + + SERV = 0x01 + IDEN = 0x02 + UNKNOWN = 0x00 + + +class OpMode(IntEnum): + """ + enum class for analysis mode + """ + + VEN_SPEC = 0x01 + ISO = 0x02 + + +class LogMode(IntEnum): + """ + enum class for log mode + """ + + STD_OUT = 0x01 + LOG_FILE = 0x02 + DUBUG = 0x03 diff --git a/src/gallia/analyzer/name_config.py b/src/gallia/analyzer/name_config.py new file mode 100644 index 000000000..d38860b3a --- /dev/null +++ b/src/gallia/analyzer/name_config.py @@ -0,0 +1,129 @@ +""" +gallia-analyze Name Config module +""" +if __name__ == "__main__": + exit() + +NEG_STR = "NOT " + + +class TblNm: + """ + class for relational table names in the database + """ + + scan_result = "scan_result" + scan_run = "scan_run" + run_meta = "run_meta" + serv = "analysis_service" + iden = "analysis_identifier" + ven_lu = "vendor_lookup" + ven_sess = "vendor_session" + ref_resp = "analysis_ref_response" + ref_sess = "analysis_ref_session" + ref_sbfn = "analysis_ref_subfunc" + meta = "analysis_meta" + + +class ColNm: + """ + class for colunm names in relational tables + """ + + run = "run" + run_id = "run_id" + index = "index" + sess = "session" + sess_name = "session_name" + state = "state" + serv = "service" + serv_name = "service_name" + sbfn = "subfunc" + iden = "identifier" + fail = "failure" + resp = "response" + resp_name = "response_name" + scan_mode = "scan_mode" + mode = "mode" + ecu_mode = "ecu_mode" + boot = "boot" + combi = "combi" + dft = "default" + id = "id" + t_rqst = "request_time" + t_resp = "response_time" + t_react = "reaction_time" + prefix = "$_" + infix = "_" + ecu_mode = "ecu_mode" + is_err = "is_error" + + +class VwNm: + """ + class for view names in database + """ + + ecu_vw = "ecu_view" + mode_vw = "mode_view" + ven_ref_vw = "vendor_ref_view" + ven_ref_sep_vw = "vendor_ref_sep_view" + resp_vw = "res_view" + ref_vw = "ref_view" + serv_oi = "service_of_interest" + sess_alwd = "session_allowed" + sbfn_alwd = "subfunc_allowed" + iden_alwd = "identifier_allowed" + resp_alwd = "response_allowed" + + +class KyNm: + """ + class for JSON key names + """ + + err = "response" + err_name = "response_name" + sess = "session" + sess_name = "session_name" + serv = "service" + serv_name = "service_name" + resp = "response" + rgb = "rgb" + sbfn = "subfunc" + mode = "mode" + + # key names for conditions + scan_serv = "scan-service" + scan_iden = "scan-identifier" + scan_mode = "SCAN MODE" + fail = "FAILURE" + resd = "RESPONDED" + match = "MATCH" + supp = "SUPPORTED" + known = "KNOWN" + for_serv = "FOR SERVICE" + + +class ShtNm: + """ + class for EXCEL sheet names + """ + + init = "Sheet" + sum = "Summary" + undoc = "IDs Undocumented" + miss = "IDs Missing" + + +class CellCnt: + """ + class for EXCEL cell texts + """ + + default = "Default" + serv = "Service ID" + no_ent = "NO_ENTRY" + sbfn = "Subfunc" + sess_unscn = "[Session not scanned]" + sess_undoc = "[Session undocumented]" diff --git a/src/gallia/analyzer/naming_conventions.txt b/src/gallia/analyzer/naming_conventions.txt new file mode 100644 index 000000000..57f251b08 --- /dev/null +++ b/src/gallia/analyzer/naming_conventions.txt @@ -0,0 +1,53 @@ + +# ========================================================== # +# +# [Data type naming conventions] +# +# +# _ls: python list +# _dict: python dictionary +# _sql: SQL string +# _vec: numpy n-dimentional array +# _ser: pandas Series +# _df: pandas Data Frame +# _cond: partial SQL string for condition +# _on: python boolean +# +# +# cond_: pandas Series of conditional expressions +# +# [Abbreviations] +# +# abn: abnormal +# aem: all ECU modes +# aio: all-in-one +# alwd: allowed +# col: column +# cond: condition +# cur: current +# err: error +# iden: identifier +# lu: lookup +# neg: negative +# nm: name +# oi: of interest +# op: operation +# pos: positive +# prec: precision +# psb: possible +# resp: response +# sbfn: sub-function +# serv: service +# sess: session +# sht: sheet +# sid: service id +# src: source +# supp: supported +# tbl: table +# tra: reaction time +# trg: target +# ven: vendor +# wi: with +# wo: without +# +# ========================================================== # diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py new file mode 100644 index 000000000..17d5fd52a --- /dev/null +++ b/src/gallia/analyzer/operator.py @@ -0,0 +1,581 @@ +""" +gallia-analyze Operator module +""" +import json +from json.decoder import JSONDecodeError +from typing import Tuple +from sqlite3 import OperationalError +from itertools import chain +import numpy as np +import pandas as pd +from pandas.core.indexing import IndexingError +from gallia.analyzer.db_handler import DatabaseHandler +from gallia.analyzer.config import TblStruct, SrcPath, MiscError, NUM_ECU_MODES +from gallia.analyzer.iso_def import ISO_ERR_FOR_ALL, ISO_ERR_NOT_SUPP, ISO_SERV_BY_ID +from gallia.analyzer.failure import Failure +from gallia.analyzer.name_config import ColNm, TblNm, VwNm +from gallia.analyzer.mode_config import LogMode, ScanMode, OpMode +from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException +from gallia.analyzer.constants import UDSIsoSessions +from gallia.uds.core.constants import UDSErrorCodes, UDSIsoServices + + +if __name__ == "__main__": + exit() + + +class Operator(DatabaseHandler): + """ + Class for common basic operations and utilites such as loading meta data of runs, + loading reference dictionaries, getting other information from a certain run in the database. + """ + + def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): + DatabaseHandler.__init__(self, path, log_mode) + self.msg_head = "[Operator] " + self.num_modes = 0 + self.run_meta_df = pd.DataFrame() + self.lu_iden_df = pd.DataFrame() + self.ref_ven_df = pd.DataFrame() + self.supp_serv_ven_vec = np.array([]) + self.sess_code_vec = np.array([]) + self.sess_code_dict = dict() + self.sess_name_dict = dict() + self.load_all_dicts() + if self.connect_db(): + self.load_ref_iso() + + def check_df(self, raw_df: pd.DataFrame, cols_dict: dict) -> bool: + """ + check if a data frame matches with the given data structure. + may raise EmptyTableException or ColumnMismatchException. + """ + if raw_df.shape == (0, 0): + raise EmptyTableException + for col in cols_dict.keys(): + if col not in raw_df.columns: + raise ColumnMismatchException + return True + + def get_runs(self) -> np.ndarray: + """ + get all a numpy array of all runs in the database. + """ + if self.load_meta(force=True): + return self.run_meta_df.index + return np.array([]) + + def get_scan_mode(self, run: int) -> ScanMode: + """ + get scan mode of a run in the database. + """ + if not self.load_meta(): + return ScanMode.UNKNOWN + try: + scan_mode_str = self.run_meta_df.loc[run, ColNm.scan_mode] + if scan_mode_str == "scan-services": + return ScanMode.SERV + if scan_mode_str == "scan-identifiers": + return ScanMode.IDEN + except (KeyError, IndexingError, AttributeError) as exc: + self.log("getting scan mode failed", True, exc) + return ScanMode.UNKNOWN + return ScanMode.UNKNOWN + + def get_sid(self, run: int) -> int: + """ + get Service ID for a given scan_identifier run. + """ + try: + if self.get_scan_mode(run) != ScanMode.IDEN: + self.log("scan mode is not scan_identifier.", True) + return -1 + raw_df = self.read_run_db(TblNm.iden, run) + self.check_df(raw_df, TblStruct.iden) + serv_vec = pd.unique(raw_df[ColNm.serv]) + if serv_vec.shape[0] > 1: + self.log("A run has more than one Service ID.", True) + serv_ser = raw_df[ColNm.serv].mode() + if serv_ser.shape[0] > 1: + self.log("A run has more than one most frequent Service ID.", True) + except ( + KeyError, + IndexingError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.log("getting Service ID failed", True, exc) + return -1 + return serv_ser[0] + + def get_ecu_mode(self, run: int) -> int: + """ + get ECU mode of a run in the database. + """ + if not self.load_meta(): + return -1 + try: + ecu_mode = self.run_meta_df.loc[run, ColNm.ecu_mode] + return ecu_mode + except (KeyError, IndexingError, AttributeError) as exc: + self.log("getting ECU mode failed", True, exc) + return -1 + + def get_op_mode(self, iso_on: bool) -> OpMode: + """ + get analysis mode. Input of True returns vendor-specific analysis mode. + """ + if iso_on: + an_opt = OpMode.ISO + else: + an_opt = OpMode.VEN_SPEC + return an_opt + + def get_sess_lu(self) -> np.ndarray: + """ + get a vector of diagnostic sessions that are definded in vendor lookup table. + """ + try: + lu_df = self.read_db(TblNm.ven_lu) + self.check_df(lu_df, TblStruct.ven_lu) + sess_vec = pd.unique(lu_df[ColNm.sess]) + except ( + KeyError, + IndexingError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.log("getting sessions in lookup table failed", True, exc) + return np.array([]) + return sess_vec + + def get_ref_df_from_json(self, path: str) -> pd.DataFrame: + """ + get reference summary from JSON file. + """ + try: + with open(path, encoding="utf8") as source_json: + serv_ls = json.load(source_json) + ref_df = pd.DataFrame() + for serv in serv_ls: + ser = pd.Series(serv) + ref_df = pd.concat([ref_df, ser], axis=1) + ref_df = ref_df.T + ref_df.loc[:, ColNm.serv] = ref_df.loc[:, ColNm.serv].astype("int64") + ref_df.sort_values(ColNm.serv) + ref_df = ref_df.set_index(ColNm.serv) + except ( + KeyError, + IndexingError, + AttributeError, + FileNotFoundError, + JSONDecodeError, + ) as exc: + self.log("getting reference summary from JSON failed", True, exc) + return pd.DataFrame() + return ref_df + + def get_dft_err_df(self, run: int) -> Tuple[pd.DataFrame, np.ndarray]: + """ + get data frame that shows most common error(default error) + for each diagnostic session regarding a run. + """ + try: + scan_mode = self.get_scan_mode(run) + if scan_mode == ScanMode.SERV: + raw_df = self.read_run_db(TblNm.serv, run) + self.check_df(raw_df, TblStruct.serv) + else: + raw_df = self.read_run_db(TblNm.iden, run) + self.check_df(raw_df, TblStruct.iden) + except (EmptyTableException, ColumnMismatchException) as exc: + self.log("getting default error data frame failed", True, exc) + return pd.DataFrame() + return self.get_dft_err_df_from_raw(raw_df) + + def get_dft_err_df_from_raw(self, raw_df: pd.DataFrame) -> pd.DataFrame: + """ + get summarized data frame that shows most common error(default error) + for each diagnostic session from raw data frame. + """ + try: + sess_vec = pd.unique(raw_df[ColNm.sess]) + dft_err_df = pd.DataFrame([], index=[ColNm.dft], columns=sess_vec) + for sess in sess_vec: + cond = raw_df[ColNm.sess] == sess + dft_err_df.loc[ColNm.dft, sess] = raw_df.loc[cond, ColNm.resp].mode()[0] + dft_err_df.attrs[ColNm.serv] = list(pd.unique(raw_df[ColNm.serv])) + except ( + KeyError, + IndexingError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.log("getting default error data frame failed", True, exc) + return pd.DataFrame() + return dft_err_df + + def get_pos_res(self, search_id: int) -> str: + """ + get positive response from data table with a scan entry ID. + """ + try: + res_sql = f""" + SELECT json_extract("response_data", '$.data_records[0]') + FROM "{TblNm.scan_result}" WHERE "{ColNm.id}" = {str(search_id)}; + """ + res_df = self.get_df_by_query(res_sql) + resp = res_df.iloc[0, 0] + except (KeyError, IndexingError, AttributeError) as exc: + self.log("getting positive response failed", True, exc) + return "" + return resp + + def load_meta(self, force: bool = False) -> bool: + """ + load meta data of all runs in the database. + """ + if force: + pass + elif self.run_meta_df.shape != (0, 0): + return True + gen_meta_sql = f""" + DROP VIEW IF EXISTS "{VwNm.ecu_vw}"; + DROP VIEW IF EXISTS "{VwNm.mode_vw}"; + CREATE VIEW "{VwNm.ecu_vw}" + AS SELECT "{ColNm.id}", json_extract("properties_pre", "$.mode") AS "{ColNm.ecu_mode}" + FROM "{TblNm.scan_run}"; + CREATE VIEW "{VwNm.mode_vw}" + AS SELECT "{ColNm.id}" AS "{ColNm.run_id}", + "script" AS "{ColNm.scan_mode}" + FROM "{TblNm.run_meta}"; + DROP TABLE IF EXISTS "{TblNm.meta}"; + CREATE TABLE "{TblNm.meta}" + AS SELECT "{ColNm.run_id}", "{ColNm.ecu_mode}", "{ColNm.scan_mode}" + FROM "{VwNm.ecu_vw}" + INNER JOIN "{VwNm.mode_vw}" + ON "{VwNm.ecu_vw}"."{ColNm.id}" = "{VwNm.mode_vw}"."{ColNm.run_id}"; + DROP VIEW IF EXISTS "{VwNm.ecu_vw}"; + DROP VIEW IF EXISTS "{VwNm.mode_vw}"; + """ + try: + self.cur.executescript(gen_meta_sql) + meta_df = self.read_db(TblNm.meta) + if meta_df.shape == (0, 0): + self.log("no meta data", True) + return False + meta_df.set_index("run_id", inplace=True) + self.run_meta_df = meta_df + except (KeyError, IndexingError, AttributeError, OperationalError) as exc: + self.log("loading run meta data failed", True, exc) + return False + return True + + def load_ven_lu(self, force: bool = False, num_modes: int = NUM_ECU_MODES) -> bool: + """ + load reference summary for vendor-specific analysis from the database. + """ + if force: + pass + elif self.ref_ven_df.shape != (0, 0): + return True + try: + lu_df = self.read_db(TblNm.ven_lu) + self.check_df(lu_df, TblStruct.ven_lu) + supp_serv_vec = np.sort(pd.unique(lu_df[ColNm.serv])) + mode_vec = np.arange(num_modes) + ven_lu_dict = {} + self.num_modes = 0 + for mode in mode_vec: + loi_df = lu_df[lu_df[ColNm.ecu_mode] == mode].copy() + if loi_df.shape[0] == 0: + continue + else: + self.num_modes += 1 + ref_df = pd.DataFrame(columns=supp_serv_vec) + for serv in supp_serv_vec: + sess_ls = list( + np.sort( + pd.unique( + loi_df.loc[loi_df[ColNm.serv] == serv, ColNm.sess] + ) + ) + ) + sbfn_ls = list( + np.sort( + pd.unique( + loi_df.loc[loi_df[ColNm.serv] == serv, ColNm.sbfn] + ) + ) + ) + iden_ls = list( + np.sort( + pd.unique( + loi_df.loc[ + loi_df[ColNm.serv] == serv, + ColNm.iden, + ] + ) + ) + ) + ref_df[serv] = pd.Series( + [sess_ls, sbfn_ls, iden_ls], + index=[ColNm.sess, ColNm.sbfn, ColNm.iden], + ) + ven_lu_dict[mode] = ref_df.T + ven_lu_df = pd.concat(ven_lu_dict.values(), axis=1, keys=ven_lu_dict.keys()) + self.ref_ven_df: pd.DataFrame = ven_lu_df + self.supp_serv_ven_vec = np.sort(np.array(ven_lu_df.index)) + except ( + KeyError, + IndexingError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.log("loading vendor-specific reference failed", True, exc) + return False + return True + + def load_ref_iso(self, force: bool = False) -> bool: + """ + load reference summary for UDS ISO standard. + """ + if force: + pass + elif self.ref_ven_df.shape != (0, 0): + return True + try: + ref_iso_df = self.get_ref_df_from_json(SrcPath.uds_iso_src) + self.supp_serv_iso_vec = np.sort(np.array(ref_iso_df.index)) + self.ref_iso_df: pd.DataFrame = ref_iso_df.sort_index() + except (KeyError, IndexingError, AttributeError) as exc: + self.log("loading reference summary for UDS ISO failed", True, exc) + return False + return True + + def load_all_dicts(self) -> bool: + """ + load necessary dictionaries for UDS ISO standard. + """ + self.iso_err_means_not_supp_vec = np.array(ISO_ERR_NOT_SUPP) + self.iso_supp_err_for_all_vec = np.array(ISO_ERR_FOR_ALL) + self.iso_serv_by_iden_vec = np.array(ISO_SERV_BY_ID) + self.iso_serv_name_dict = {serv.name: serv.value for serv in UDSIsoServices} + self.iso_serv_name_dict.update({"noService": -1}) + self.iso_serv_code_dict = dict( + (y, x) for x, y in self.iso_serv_name_dict.items() + ) + self.iso_serv_code_vec = np.array(list(self.iso_serv_name_dict.values())) + self.iso_err_name_dict = { + e.name: e.value for e in chain(UDSErrorCodes, MiscError) + } + self.iso_err_code_dict = dict((y, x) for x, y in self.iso_err_name_dict.items()) + self.iso_err_code_vec = np.array(list(self.iso_err_name_dict.values())) + self.iso_sess_name_dict = {sess.name: sess.value for sess in UDSIsoSessions} + self.iso_sess_code_dict = dict( + (y, x) for x, y in self.iso_sess_name_dict.items() + ) + self.fail_name_dict = {fail.name: fail.value for fail in Failure} + self.fail_code_dict = dict((y, x) for x, y in self.fail_name_dict.items()) + return True + + def load_ven_sess(self) -> bool: + try: + sess_df = self.read_db(TblNm.ven_sess) + self.check_df(sess_df, TblStruct.ven_sess) + sess_df = sess_df.set_index(ColNm.sess) + self.sess_code_dict = sess_df[ColNm.sess_name].to_dict(dict) + self.sess_name_dict = dict((y, x) for x, y in self.sess_code_dict.items()) + self.sess_code_vec = np.array(list(self.sess_code_dict.keys())) + except ( + KeyError, + IndexingError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.sess_name_dict = self.iso_sess_name_dict + self.sess_code_dict = self.iso_sess_code_dict + self.log("loading vendor-specific sessions failed", True, exc) + return False + return True + + def load_lu_iden(self, serv: int, ecu_mode: int) -> bool: + """ + load lookup reference of a certain service for scan_identifier analysis. + """ + if serv not in self.iso_serv_by_iden_vec: + return False + try: + raw_df = self.read_db(TblNm.ven_lu) + self.check_df(raw_df, TblStruct.ven_lu) + serv_df = raw_df[ + (raw_df[ColNm.serv] == serv) & (raw_df[ColNm.ecu_mode] == ecu_mode) + ].copy() + self.lu_iden_df = pd.DataFrame( + pd.unique( + list( + zip( + serv_df[ColNm.sess], + serv_df[ColNm.boot], + serv_df[ColNm.sbfn], + serv_df[ColNm.iden], + serv_df[ColNm.ecu_mode], + ) + ) + ), + columns=[ColNm.combi], + ) + except ( + EmptyTableException, + ColumnMismatchException, + KeyError, + AttributeError, + OperationalError, + ) as exc: + self.lu_iden_df = pd.DataFrame() + self.log(f"loading lookup for service 0x{serv:02x} failed", True, exc) + return False + return True + + def prepare_table(self) -> bool: + """ + prepare relational tables to save data for scan_service and scan_identifier. + """ + if not self.create_table(TblNm.serv, TblStruct.serv): + self.log("preparing table for scan_service failed.", True) + return False + if not self.create_table(TblNm.iden, TblStruct.iden): + self.log("preparing table for scan_identifier failed.", True) + return False + return True + + def prepare_alwd_all( + self, ecu_mode: int = 0, op_mode: OpMode = OpMode.VEN_SPEC + ) -> bool: + """ + prepare reference relational tables for response, session and subfunctions. + """ + if op_mode == OpMode.ISO: + ref_df = self.ref_iso_df + if op_mode == OpMode.VEN_SPEC: + ref_df = self.ref_ven_df[ecu_mode] + if not self.prepare_alwd_res(): + return False + if not self.prepare_alwd_sess_boot(op_mode, ecu_mode): + return False + if not self.prepare_alwd( + TblNm.ref_sbfn, TblStruct.ref_sbfn, ColNm.sbfn, ref_df + ): + return False + return True + + def prepare_alwd_res(self) -> bool: + """ + prepare reference relational table for response. + """ + if not self.prepare_alwd( + TblNm.ref_resp, + TblStruct.ref_resp, + ColNm.resp, + self.ref_iso_df, + ): + return False + return True + + def prepare_alwd_sess_boot( + self, op_mode: OpMode = OpMode.VEN_SPEC, ecu_mode: int = 0 + ) -> bool: + """ + prepare reference relational table for session and boot. + """ + try: + if op_mode == OpMode.ISO: + return self.prepare_alwd( + TblNm.ref_sess, TblStruct.ref_sess, ColNm.sess, self.ref_iso_df + ) + if not self.create_table(TblNm.ref_sess, TblStruct.ref_sess): + return False + pair_ls = [] + ven_lu_df = self.read_db(TblNm.ven_lu) + self.check_df(ven_lu_df, TblStruct.ven_lu) + ven_lu_df = ven_lu_df[ven_lu_df[ColNm.ecu_mode] == ecu_mode] + ven_lu_df[ColNm.combi] = list( + zip(ven_lu_df[ColNm.serv], ven_lu_df[ColNm.sess], ven_lu_df[ColNm.boot]) + ) + entries_vec = pd.unique(ven_lu_df[ColNm.combi]) + for entry in entries_vec: + pair_ls.append((entry[0], entry[1], entry[2])) + pair_df = pd.DataFrame( + pair_ls, columns=[ColNm.serv, ColNm.sess, ColNm.boot] + ) + self.write_db(pair_df, TblNm.ref_sess) + except ( + KeyError, + IndexError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.log("preparing table for session and boot failed", True, exc) + return False + return True + + def prepare_alwd( + self, table_name: str, table_struct: dict, col_name: str, ref_df: pd.DataFrame + ) -> bool: + """ + prepare a relational table for available diagnotic sessions, sub-functions + or NRCs for Service IDs defined in UDS ISO Standard. + """ + try: + if not self.create_table(table_name, table_struct): + return False + pair_ls = [] + for serv in ref_df[col_name].index: + entries_ls = ref_df.loc[serv, col_name] + for entry in entries_ls: + pair_ls.append((serv, entry)) + if table_name == TblNm.ref_resp and col_name == ColNm.resp: + for entry in self.iso_supp_err_for_all_vec: + pair_ls.append((serv, entry)) + pair_df = pd.DataFrame(pair_ls, columns=[ColNm.serv, col_name]) + self.write_db(pair_df, table_name) + except (KeyError, IndexError, AttributeError) as exc: + self.log("preparing table for availabilities failed", True, exc) + return False + return True + + def clear(self) -> bool: + """ + clear all relational tables in the database. + """ + if not self.clear_alwd(): + return False + table_ls = [ + TblNm.serv, + TblNm.iden, + TblNm.meta, + ] + for table_name in table_ls: + if not self.delete_table(table_name): + return False + return True + + def clear_alwd(self) -> bool: + """ + clear relational tables for reference in the databse. + """ + table_ls = [ + TblNm.ref_resp, + TblNm.ref_sess, + TblNm.ref_sbfn, + ] + for table_name in table_ls: + if not self.delete_table(table_name): + return False + return True diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py new file mode 100644 index 000000000..aa352e17b --- /dev/null +++ b/src/gallia/analyzer/reporter.py @@ -0,0 +1,360 @@ +""" +gallia-analyze Reporter module +""" +import os +import numpy as np +import pandas as pd +from pandas.core.indexing import IndexingError +from gallia.analyzer.operator import Operator +from gallia.analyzer.xl_generator import ExcelGenerator +from gallia.analyzer.config import TblStruct, NUM_ECU_MODES +from gallia.analyzer.mode_config import LogMode, ScanMode +from gallia.analyzer.name_config import ColNm, TblNm +from gallia.analyzer.exceptions import ColumnMismatchException, EmptyTableException + +if __name__ == "__main__": + exit() + + +class Reporter(Operator): + """ + Reporter class for generating EXCEL report and visualizing data with graphs and data frames. + """ + + def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): + Operator.__init__(self, path, log_mode) + self.msg_head = "[Reporter] " + self.out_path = f"./reports_{os.path.basename(path)}/" + self.abn_serv_vec = np.array([]) + self.abn_iden_vec = np.array([]) + self.xl_ext = ".xlsx" + + def iterate_all(self, out_path: str, show_psb: bool = False) -> bool: + """ + consolidate all scan_identifier runs for all services by identifier respectively into EXCEL files. + """ + for serv in self.iso_serv_by_iden_vec: + if not self.consolidate_xl_iden(serv, out_path, show_psb): + continue + return True + + def consolidate_xl_serv(self, out_path: str, show_psb: bool = False) -> bool: + """ + consolidate all scan_service runs sorted by ECU mode into one EXCEL file. + """ + if not self.load_meta(force=True): + return False + self.load_ven_sess() + self.load_ven_lu() + self.log(f"consolidating scan_service by ECU mode from {self.db_path} ...") + self.set_path_prefix(out_path) + xl_generator = ExcelGenerator(self.db_path, self.log_mode) + xl_is_empty = True + for ecu_mode in np.arange(self.num_modes): + try: + sql = f""" + SELECT * FROM "{TblNm.serv}" + WHERE "{ColNm.ecu_mode}" = {str(ecu_mode)}; + """ + raw_df = self.get_df_by_query(sql, False) + self.check_df(raw_df, TblStruct.serv) + if not self.load_sid_oi_from_df(raw_df, ecu_mode): + continue + entries_vec = self.get_entries_oi(ScanMode.SERV, show_psb) + xl_generator.write_xl(entries_vec, raw_df, ScanMode.SERV, ecu_mode) + xl_is_empty = False + except ( + IndexError, + ColumnMismatchException, + AttributeError, + ) as exc: + self.log("consolidating scan_service failed", True, exc) + continue + except EmptyTableException: + self.log(f"nothing to report for ECU mode {ecu_mode}.") + if xl_is_empty: + return False + self.set_path_prefix(out_path) + out_path = self.get_path( + "all_services_by_ecu_mode", self.xl_ext, rm_if_exists=True + ) + if not xl_generator.save_close_xl(out_path): + return False + return True + + def consolidate_xl_iden( + self, serv: int, out_path: str, show_psb: bool = False + ) -> bool: + """ + consolidate all scan_identifier runs sorted by ECU mode + for a certain given service into one EXCEL file. + """ + if not serv in self.iso_serv_by_iden_vec: + self.log("given Service ID is not service by identifier.") + return False + if not self.load_meta(force=True): + return False + self.load_ven_sess() + self.load_ven_lu() + self.log( + f"consolidating for Service ID 0x{serv:02X} {self.iso_serv_code_dict[serv]} from {self.db_path} ..." + ) + self.set_path_prefix(out_path) + xl_generator = ExcelGenerator(self.db_path, self.log_mode) + xl_is_empty = True + if self.num_modes == 0: + num_modes = NUM_ECU_MODES + self.log( + f"no information about ECU modes. trying {NUM_ECU_MODES} mode(s)..." + ) + else: + num_modes = self.num_modes + for ecu_mode in np.arange(num_modes): + try: + sql = f""" + SELECT * FROM "{TblNm.iden}" + WHERE "{ColNm.ecu_mode}" = {str(ecu_mode)} + AND "{ColNm.serv}" = {str(serv)}; + """ + raw_df = self.get_df_by_query(sql, False) + self.check_df(raw_df, TblStruct.iden) + self.load_iden_oi_from_df(raw_df, ecu_mode) + entries_vec = self.get_entries_oi(ScanMode.IDEN, show_psb) + if xl_generator.write_xl(entries_vec, raw_df, ScanMode.IDEN, ecu_mode): + xl_is_empty = False + except ( + IndexError, + ColumnMismatchException, + AttributeError, + ) as exc: + self.log("consolidating scan_identifier failed", True, exc) + continue + except EmptyTableException: + self.log(f"nothing to report for ECU mode {ecu_mode}.") + if xl_is_empty: + self.log(f"nothing to report for Service ID 0x{serv:02X}") + return False + self.set_path_prefix(out_path) + out_path = self.get_path( + f"0x{serv:02X}_{self.iso_serv_code_dict[serv]}", + self.xl_ext, + rm_if_exists=True, + ) + if not xl_generator.save_close_xl(out_path): + return False + return True + + def report_xl( + self, + runs_vec: np.ndarray, + show_psb: bool = False, + out_path: str = "", + ) -> bool: + """ + generate EXCEL report for given input runs. + """ + if not self.load_meta(force=True): + return False + self.load_ven_sess() + self.load_ven_lu() + self.set_path_prefix(out_path) + for run in runs_vec: + self.report_xl_each_run(run, show_psb) + return True + + def report_xl_each_run(self, run: int, show_psb: bool = False) -> bool: + """ + generate EXCEL report for a certain run. + """ + self.log(f"reporting run #{str(run)} from {self.db_path} ...") + scan_mode = self.get_scan_mode(run) + if scan_mode == ScanMode.SERV: + return self.report_xl_serv(run, show_psb) + if scan_mode == ScanMode.IDEN: + return self.report_xl_iden(run, show_psb) + return False + + def report_xl_serv(self, run: int, show_psb: bool = False) -> bool: + """ + generate EXCEL report for a certain run of scan_service. + """ + try: + raw_df = self.read_run_db(TblNm.serv, run) + self.check_df(raw_df, TblStruct.serv) + self.load_sid_oi_from_df(raw_df) + entries_vec = self.get_entries_oi(ScanMode.SERV, show_psb) + xl_generator = ExcelGenerator(self.db_path, self.log_mode) + if not xl_generator.write_xl(entries_vec, raw_df, ScanMode.SERV): + return False + out_path = self.get_path( + f"serv_run{run:02}", self.xl_ext, rm_if_exists=True + ) + if not xl_generator.save_close_xl(out_path): + return False + except (EmptyTableException, ColumnMismatchException, AttributeError) as exc: + self.log("reporting scan_service failed", True, exc) + return False + return True + + def report_xl_iden(self, run: int, show_psb: bool = False) -> bool: + """ + generate EXCEL report for a certain run of scan_identifier. + """ + try: + raw_df = self.read_run_db(TblNm.iden, run) + self.check_df(raw_df, TblStruct.iden) + self.load_iden_oi_from_df(raw_df) + entries_vec = self.get_entries_oi(ScanMode.IDEN, show_psb) + xl_generator = ExcelGenerator(self.db_path, self.log_mode) + if not xl_generator.write_xl(entries_vec, raw_df, ScanMode.IDEN): + return False + out_path = self.get_path( + f"iden_run{run:02}", self.xl_ext, rm_if_exists=True + ) + if not xl_generator.save_close_xl(out_path): + return False + except (EmptyTableException, ColumnMismatchException, AttributeError) as exc: + self.log("reporting scan_identifier failed", True, exc) + return False + return True + + def set_path_prefix(self, path: str = "") -> None: + """ + set path prefix for EXCEL report file to save. + """ + if path == "": + self.out_path = os.path.expanduser( + f"./reports_{os.path.basename(self.db_path)}/" + ) + elif path == ".": + self.out_path = os.path.expanduser("./") + elif path == "..": + self.out_path = os.path.expanduser("../") + elif path[-1] == "/": + self.out_path = os.path.expanduser(path) + else: + self.out_path = os.path.expanduser(path + "/") + + def get_path( + self, suffix: int = "", ext: str = ".xlsx", rm_if_exists: bool = False + ) -> str: + """ + get path for EXCEL report file by combining path prefix, + run number and EXCEL extention. + """ + try: + dir_name = os.path.dirname(self.out_path) + file_name = os.path.basename(self.out_path) + if dir_name == "": + dir_name = "./" + out_path = os.path.join(dir_name, file_name + str(suffix) + ext) + if os.path.isdir(dir_name): + if os.path.isfile(out_path) and rm_if_exists: + os.remove(out_path) + self.log(f"existing file removed from {out_path}") + else: + os.mkdir(dir_name) + self.log(f"directory created at {dir_name}") + except (OSError) as exc: + self.log("getting path failed", True, exc) + return f"./reports/run{str(suffix)}{ext}" + return out_path + + def get_entries_oi(self, scan_mode: ScanMode, show_psb: bool = False) -> np.ndarray: + """ + get services or identifieres of interest to display in summary sheet. + """ + if show_psb: + if scan_mode == ScanMode.SERV: + return np.arange(256) + if scan_mode == ScanMode.IDEN: + return np.arange(65536) + else: + if scan_mode == ScanMode.SERV: + return self.abn_serv_vec + if scan_mode == ScanMode.IDEN: + return self.abn_iden_vec + return np.array([]) + + def load_sid_oi(self, run: int, ecu_mode: int = -1) -> bool: + """ + load services of interest for a given input run. + """ + try: + raw_df = self.read_run_db(TblNm.serv, run) + self.check_df(raw_df, TblStruct.serv) + if not self.load_sid_oi_from_df(raw_df, ecu_mode): + return False + except (EmptyTableException, ColumnMismatchException) as exc: + self.log("loading services of interest failed", True, exc) + return False + return True + + def load_sid_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool: + """ + load services of interest from input raw data frame. + """ + try: + dft_err_df = self.get_dft_err_df_from_raw(raw_df) + dft_err_ser: pd.Series = dft_err_df.loc[ColNm.dft] + cond_abn = pd.Series([False]).repeat(raw_df.shape[0]).reset_index(drop=True) + sess_vec = np.array(dft_err_df.columns) + raw_df[ColNm.combi] = list(zip(raw_df[ColNm.sess], raw_df[ColNm.resp])) + for sess in sess_vec: + cond_abn |= raw_df[ColNm.combi].apply( + lambda x, s=sess: (x[0] == s) + and (x[1] != dft_err_ser[s]) + and (x[1] != -1) + and (x[1] != 0) + ) + if ecu_mode != -1: + cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode + self.abn_serv_vec = np.sort(pd.unique(raw_df.loc[cond_abn, ColNm.serv])) + except (KeyError, IndexingError, AttributeError) as exc: + self.log("loading services of interest from data frame failed", True, exc) + return False + return True + + def load_iden_oi(self, run: int, ecu_mode: int = -1) -> bool: + """ + load identifiers of interest for a given input run. + """ + try: + raw_df = self.read_run_db(TblNm.iden, run) + self.check_df(raw_df, TblStruct.iden) + if not self.load_iden_oi_from_df(raw_df, ecu_mode): + return False + except (EmptyTableException, ColumnMismatchException) as exc: + self.log("loading identifiers of interest failed", True, exc) + return False + return True + + def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool: + """ + load identifiers of interest from input raw data frame. + """ + try: + serv_vec = np.sort(pd.unique(raw_df[ColNm.serv])) + if not serv_vec.size == 1: + self.log("more than one service in a run", True) + return False + dft_err_df = self.get_dft_err_df_from_raw(raw_df) + dft_err_ser: pd.Series = dft_err_df.loc[ColNm.dft] + cond_abn = pd.Series([False]).repeat(raw_df.shape[0]).reset_index(drop=True) + sess_vec = np.array(dft_err_df.columns) + raw_df[ColNm.combi] = list(zip(raw_df[ColNm.sess], raw_df[ColNm.resp])) + for sess in sess_vec: + cond_abn |= raw_df[ColNm.combi].apply( + lambda x, s=sess: (x[0] == s) + and (x[1] != dft_err_ser[s]) + and (x[1] != -1) + ) + if ecu_mode != -1: + cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode + self.abn_iden_vec = np.sort(pd.unique(raw_df.loc[cond_abn, ColNm.iden])) + except (KeyError, IndexingError, AttributeError) as exc: + self.log( + "loading identifiers of interest from data frame failed", True, exc + ) + return False + return True diff --git a/src/gallia/analyzer/time_analyzer.py b/src/gallia/analyzer/time_analyzer.py new file mode 100644 index 000000000..0388f5bfc --- /dev/null +++ b/src/gallia/analyzer/time_analyzer.py @@ -0,0 +1,240 @@ +""" +gallia-analyze Time Analyzer module +""" +import json +import numpy as np +import pandas as pd +from pandas.core.indexing import IndexingError +import matplotlib.pyplot as plt +from gallia.analyzer.exceptions import ColumnMismatchException, EmptyTableException +from gallia.analyzer.reporter import Reporter +from gallia.analyzer.config import PltDesign, TblStruct, SrcPath, DFT_T_PREC +from gallia.analyzer.mode_config import ScanMode, LogMode +from gallia.analyzer.name_config import ColNm, TblNm, KyNm + +if __name__ == "__main__": + exit() + + +class TimeAnalyzer(Reporter): + """ + Time Analyzer class for reaction time analysis. + """ + + def __init__( + self, + path: str = "", + t_prec: int = DFT_T_PREC, + log_mode: LogMode = LogMode.STD_OUT, + ): + Reporter.__init__(self, path, log_mode) + self.msg_head = "[TimeAnalyzer] " + self.t_prec = t_prec + self.jpg_ext = ".jpg" + self.csv_ext = ".csv" + + def extract_tra(self, runs_vec: np.ndarray) -> bool: + """ + extract reaction times of scan_service or scan_identifier in database. + """ + for run in runs_vec: + self.extract_tra_each_run(run) + return True + + def extract_tra_each_run(self, run: int) -> bool: + """ + extract reaction times of each run. + """ + self.log(f"extracting time for run #{str(run)} from {self.db_path} ...") + scan_mode = self.get_scan_mode(run) + if scan_mode == ScanMode.SERV: + tbl_nm = TblNm.serv + tbl_struct = TblStruct.serv + if scan_mode == ScanMode.IDEN: + tbl_nm = TblNm.iden + tbl_struct = TblStruct.iden + try: + raw_df = self.read_run_db(tbl_nm, run) + self.check_df(raw_df, tbl_struct) + raw_df[ColNm.t_rqst] = ( + raw_df[ColNm.t_rqst] + .astype(str) + .apply(self.del_p) + .apply(self.adj_len) + .astype("int64") + ) + raw_df[ColNm.t_resp] = ( + raw_df[ColNm.t_resp] + .fillna(0) + .astype(str) + .apply(self.del_p) + .apply(self.adj_len) + .astype("int64") + ) + raw_df.loc[raw_df[ColNm.t_resp] == 0, ColNm.t_resp] = raw_df.loc[ + raw_df[ColNm.t_resp] == 0, ColNm.t_rqst + ] + raw_df[ColNm.t_react] = raw_df[ColNm.t_resp] - raw_df[ColNm.t_rqst] + raw_df.to_csv( + self.get_path(f"time_run{run:02}", self.csv_ext, rm_if_exists=True) + ) + except ( + KeyError, + IndexingError, + AttributeError, + EmptyTableException, + ColumnMismatchException, + ) as exc: + self.log(f"extracting reaction time for run #{run} failed", True, exc) + return False + return True + + def plot_tra(self, runs_vec: np.ndarray) -> bool: + """ + plot service ID or identifier and reaction time in scatter plot. + """ + for run in runs_vec: + self.plot_tra_each_run(run) + return True + + def plot_tra_each_run(self, run: int) -> bool: + """ + plot reaction time for each run. + """ + self.log(f"plotting reaction time for run #{str(run)} from {self.db_path} ...") + scan_mode = self.get_scan_mode(run) + if scan_mode == ScanMode.SERV: + self.plot_tra_serv(run) + if scan_mode == ScanMode.IDEN: + self.plot_tra_iden(run) + return True + + def plot_tra_serv(self, run: int) -> bool: + """ + plot service ID and reaction time in scatter for a given run. + """ + try: + raw_df = pd.read_csv(self.get_path(f"time_run{run:02}", self.csv_ext)) + plt.rcParams["figure.figsize"] = [30, 25] + with open(SrcPath.err_src, encoding="utf8") as resp_json: + resp_ls = json.load(resp_json) + c_tbl_dict = {} + for resp in resp_ls: + c_tbl_dict.update({resp[KyNm.resp]: f"#{resp[KyNm.rgb]}"}) + plt.style.use(PltDesign.plot_style) + plt.scatter( + x=raw_df[ColNm.serv], + y=raw_df[ColNm.t_react], + s=10, + c=raw_df[ColNm.resp].map(c_tbl_dict), + cmap="viridis", + ) + plt.xlabel("Service ID") + plt.ylabel("Reaction Time (nsec)") + plt.savefig( + self.get_path( + f"serv_tra_plot_p{self.t_prec}_run{run:02}", + self.jpg_ext, + rm_if_exists=True, + ) + ) + plt.clf() + plt.cla() + plt.close() + except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: + self.log( + f"plotting service ID and reaction time in run #{run} failed", True, exc + ) + return False + return True + + def plot_tra_iden(self, run: int) -> bool: + """ + plot identifier and reaction time in scatter for a given run. + """ + try: + raw_df = pd.read_csv(self.get_path(f"time_run{run:02}", self.csv_ext)) + plt.rcParams["figure.figsize"] = [30, 25] + with open(SrcPath.err_src, encoding="utf8") as resp_json: + resp_ls = json.load(resp_json) + c_tbl_dict = {} + for resp in resp_ls: + c_tbl_dict.update({resp[KyNm.resp]: f"#{resp[KyNm.rgb]}"}) + plt.style.use(PltDesign.plot_style) + plt.scatter( + x=raw_df[ColNm.iden], + y=raw_df[ColNm.t_react], + s=10, + c=raw_df[ColNm.resp].map(c_tbl_dict), + cmap="viridis", + ) + plt.xlabel("Identifier") + plt.ylabel("Reaction Time (nsec)") + plt.savefig( + self.get_path( + f"iden_tra_plot_p{self.t_prec}_run{run:02}", + self.jpg_ext, + rm_if_exists=True, + ) + ) + plt.clf() + plt.cla() + plt.close() + except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: + self.log( + f"plotting identifier and reaction time in run #{run} failed", True, exc + ) + return False + return True + + def hist_tra(self, runs_vec: np.ndarray) -> bool: + """ + create a histogram of reaction time. + """ + for run in runs_vec: + self.hist_tra_each_run(run) + return True + + def hist_tra_each_run(self, run: int) -> bool: + """ + create a histogram of reaction time for a given run. + """ + self.log(f"creating a histogram for run #{str(run)} from {self.db_path} ...") + try: + raw_df = pd.read_csv(self.get_path(f"time_run{run:02}", self.csv_ext)) + plt.style.use(PltDesign.hist_style) + plt.hist(raw_df[ColNm.t_react], bins=500) + plt.savefig( + self.get_path( + f"tra_hist_p{self.t_prec}_run{run:02}", + self.jpg_ext, + rm_if_exists=True, + ) + ) + plt.clf() + plt.cla() + plt.close() + except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: + self.log( + f"establishing histogram of identifiers in run #{run} failed", True, exc + ) + return False + return True + + def adj_len(self, t_stamp: str) -> str: + """ + adjust the length of time stamp to the given time precision. + """ + diff = self.t_prec - len(t_stamp) + if diff < 0: + return t_stamp[:diff] + if diff > 0: + return t_stamp + ("0" * diff) + else: + return t_stamp + + def del_p(self, t_stamp: str) -> str: + """ + delete period in string. + """ + return t_stamp.replace(".", "") diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py new file mode 100644 index 000000000..872cbdd27 --- /dev/null +++ b/src/gallia/analyzer/xl_generator.py @@ -0,0 +1,544 @@ +""" +gallia-analyze EXCEL Generator module +""" + +import json +from json.decoder import JSONDecodeError +from typing import Any, Dict, Tuple +import openpyxl as op +from openpyxl.utils import get_column_letter +from openpyxl.styles import PatternFill, Font, Alignment +from openpyxl.utils.exceptions import ( + InvalidFileException, + SheetTitleException, + WorkbookAlreadySaved, + ReadOnlyWorkbookException, +) +import numpy as np +import pandas as pd +from pandas.core.indexing import IndexingError +from gallia.analyzer.operator import Operator +from gallia.analyzer.config import SrcPath, XlDesign, FAIL_CLS_CAP +from gallia.analyzer.failure import Failure +from gallia.analyzer.mode_config import LogMode, ScanMode +from gallia.analyzer.name_config import ColNm, ShtNm, CellCnt, KyNm + +if __name__ == "__main__": + exit() + + +class ExcelGenerator(Operator): + + start_row: int = 1 + start_col: int = 1 + + def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): + Operator.__init__(self, path, log_mode) + self.msg_head = "[ExcelGenerator] " + self.workbook = self.workbook = op.Workbook() + self.worksheet: Any + self.load_color_code(SrcPath.err_src) + + def write_xl( + self, + entries_vec: np.ndarray, + raw_df: pd.DataFrame, + scan_mode: ScanMode, + ecu_mode: int = -1, + ) -> bool: + self.load_ven_sess() + try: + if ecu_mode == -1: + sum_sheet_name = "Summary" + fail_sheet_suffix = "" + else: + sum_sheet_name = f"Summary({str(ecu_mode)})" + fail_sheet_suffix = f"({str(ecu_mode)})" + if ShtNm.init in self.workbook.sheetnames: + self.workbook.remove(self.workbook[ShtNm.init]) + if scan_mode == ScanMode.SERV: + if not self.add_sum_sheet_serv(raw_df, entries_vec, sum_sheet_name): + return False + if not self.add_failure_sheet(raw_df, ScanMode.SERV, fail_sheet_suffix): + return False + if scan_mode == ScanMode.IDEN: + if not self.add_sum_sheet_iden(raw_df, entries_vec, sum_sheet_name): + return False + if not self.add_failure_sheet(raw_df, ScanMode.IDEN, fail_sheet_suffix): + return False + if len(self.workbook.worksheets) == 0: + self.workbook.create_sheet(ShtNm.init) + except (SheetTitleException, ReadOnlyWorkbookException) as exc: + self.log("generating EXCEL failed", True, exc) + return False + return True + + def save_close_xl(self, out_path: str) -> bool: + try: + self.workbook.save(out_path) + self.workbook.close() + except (InvalidFileException, WorkbookAlreadySaved) as exc: + self.log("saving EXCEL failed", True, exc) + return False + return + + def add_sum_sheet_serv( + self, raw_df: pd.DataFrame, entries_vec: np.ndarray, sheet_name: str = "" + ) -> bool: + """ + add summary sheet for scan_service to report EXCEL file. + """ + if sheet_name == "": + sheet_name = ShtNm.sum + try: + self.worksheet = self.workbook.create_sheet(sheet_name) + ref_col = ColNm.serv + dft_err_df = self.get_dft_err_df_from_raw(raw_df) + cur_row, cur_col = self.sum_sheet_fill_origin(ScanMode.SERV) + cur_row, cur_col = self.sum_sheet_fill_index( + cur_row, cur_col, entries_vec, ScanMode.SERV + ) + cur_row, cur_col = self.sum_sheet_fill_sess(cur_row, cur_col, dft_err_df) + cur_row, cur_col = self.sum_sheet_fill_resp( + cur_row, + cur_col, + dft_err_df, + raw_df, + ref_col, + entries_vec, + ScanMode.SERV, + ) + except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: + self.log("adding summary sheet failed", True, exc) + return False + return True + + def add_sum_sheet_iden( + self, raw_df: pd.DataFrame, entries_vec: np.ndarray, sheet_name: str = "" + ) -> bool: + """ + add summary sheet for scan_identifier to report EXCEL file. + """ + if sheet_name == "": + sheet_name = ShtNm.sum + try: + self.worksheet = self.workbook.create_sheet(sheet_name) + ref_col = ColNm.iden + serv = pd.unique(raw_df[ColNm.serv])[0] + sbfn_vec = np.sort(pd.unique(raw_df[ColNm.sbfn])) + dft_err_df = self.get_dft_err_df_from_raw(raw_df) + cur_row, cur_col = self.sum_sheet_fill_origin(ScanMode.IDEN, serv, sbfn_vec) + cur_row, cur_col = self.sum_sheet_fill_index( + cur_row, cur_col, entries_vec, ScanMode.IDEN, sbfn_vec + ) + cur_row, cur_col = self.sum_sheet_fill_sess(cur_row, cur_col, dft_err_df) + cur_row, cur_col = self.sum_sheet_fill_resp( + cur_row, + cur_col, + dft_err_df, + raw_df, + ref_col, + entries_vec, + ScanMode.IDEN, + ) + except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: + self.log("adding summary sheet failed", True, exc) + return False + return True + + def sum_sheet_fill_origin( + self, + scan_mode: ScanMode, + serv: int = 0, + sbfn_vec: np.ndarray = np.array([]), + ) -> Tuple[int, int]: + """ + fill origin cell in summary sheet. + """ + try: + cur_row = self.start_row + cur_col = self.start_col + if scan_mode == ScanMode.SERV: + header = CellCnt.serv + if scan_mode == ScanMode.IDEN: + header = self.get_code_text(serv, self.iso_serv_code_dict) + self.worksheet.cell(cur_row, cur_col).value = header + self.worksheet.cell(cur_row, cur_col).font = Font(name=XlDesign.font_index) + self.set_cell_width(cur_col, XlDesign.dim_mid_wide) + cur_row += 1 + self.worksheet.cell(cur_row, cur_col).value = CellCnt.default + self.worksheet.cell(cur_row, cur_col).font = Font(name=XlDesign.font_index) + cur_row += 1 + if sbfn_vec.size > 1: + self.worksheet.cell( + self.start_row, self.start_col + 1 + ).value = CellCnt.sbfn + self.worksheet.freeze_panes = self.worksheet.cell( + self.start_row + 1, self.start_col + 2 + ).coordinate + else: + self.worksheet.freeze_panes = self.worksheet.cell( + self.start_row + 1, self.start_col + 1 + ).coordinate + except (KeyError, AttributeError) as exc: + self.log("filling origin cell of summary sheet failed", True, exc) + return self.start_row, self.start_col + return cur_row, cur_col + + def sum_sheet_fill_index( + self, + cur_row: int, + cur_col: int, + entries_vec: np.ndarray, + scan_mode: ScanMode, + sbfn_vec: np.ndarray = np.array([]), + ) -> Tuple[int, int]: + """ + fill index column in summary sheet. + """ + try: + for entry in entries_vec: + if scan_mode == ScanMode.SERV: + self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( + entry, self.iso_serv_code_dict + ) + cur_row += 1 + if scan_mode == ScanMode.IDEN: + if entry == -1: + index_name = CellCnt.no_ent + else: + index_name = f"0x{int(entry):04X}" + for sbfn in sbfn_vec: + if sbfn_vec.size > 1: + self.worksheet.cell( + cur_row, self.start_col + ).value = index_name + self.worksheet.cell( + cur_row, self.start_col + 1 + ).value = sbfn + fill_color = self.get_gray_color(sbfn) + self.worksheet.cell( + cur_row, self.start_col + 1 + ).fill = PatternFill( + start_color=fill_color, + end_color=fill_color, + fill_type="solid", + ) + self.worksheet.cell( + cur_row, self.start_col + 1 + ).alignment = Alignment(horizontal="center") + cur_row += 1 + else: + self.worksheet.cell(cur_row, cur_col).value = index_name + cur_row += 1 + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_index + ) + if sbfn_vec.size > 1: + cur_col += 2 + else: + cur_col += 1 + cur_row = self.start_row + except (KeyError, AttributeError) as exc: + self.log("filling index of summary sheet failed", True, exc) + return self.start_row, self.start_col + 1 + return cur_row, cur_col + + def sum_sheet_fill_sess( + self, cur_row: int, cur_col: int, dft_err_df: pd.DataFrame + ) -> Tuple[int, int]: + """ + fill top session row in summary sheet. + """ + try: + sess_vec = np.array(dft_err_df.columns) + sess_num = 0 + for sess in sess_vec: + dft_err = dft_err_df[sess][0] + if dft_err == -1: + continue + self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( + sess, self.sess_code_dict + ) + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_index + ) + self.set_cell_width(cur_col, XlDesign.dim_wide) + cur_row += 1 + self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( + dft_err, self.iso_err_code_dict + ) + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_value + ) + self.fill_cell(cur_row, cur_col, dft_err) + cur_row -= 1 + cur_col += 1 + sess_num += 1 + cur_col -= sess_num + cur_row = self.start_row + 2 + except (KeyError, IndexingError, AttributeError) as exc: + self.log("filling top session row of summary sheet failed", True, exc) + return self.start_row + 1, self.start_col + 1 + return cur_row, cur_col + + def sum_sheet_fill_resp( + self, + cur_row: int, + cur_col: int, + dft_err_df: pd.DataFrame, + raw_df: pd.DataFrame, + ref_col: str, + entries_vec: np.ndarray, + scan_mode: ScanMode, + ) -> Tuple[int, int]: + """ + fill response field in summary sheet. + """ + try: + sess_vec = np.array(dft_err_df.columns) + if scan_mode == ScanMode.SERV: + sbfn_vec = np.arange(1) + if scan_mode == ScanMode.IDEN: + sbfn_vec = np.sort(pd.unique(raw_df[ColNm.sbfn])) + for sess in sess_vec: + if dft_err_df[sess][0] == -1: + continue + for entry in entries_vec: + for sbfn in sbfn_vec: + if scan_mode == ScanMode.SERV: + cond = (raw_df[ColNm.sess] == sess) & ( + raw_df[ref_col] == entry + ) + if scan_mode == ScanMode.IDEN: + cond = ( + (raw_df[ColNm.sess] == sess) + & (raw_df[ref_col] == entry) + & (raw_df[ColNm.sbfn] == sbfn) + ) + err_ser = raw_df.loc[cond, ColNm.resp].mode() + resp = self.get_code_text( + err_ser.iloc[-1], self.iso_err_code_dict + ) + if err_ser.size == 1: + if err_ser[0] == 0: + search_id = int(raw_df[cond][ColNm.id].to_numpy()[0]) + resp = ( + str(self.get_pos_res(search_id)) + "\n" + str(resp) + ) + self.fill_cell(cur_row, cur_col, err_ser.iloc[-1]) + self.worksheet.cell(cur_row, cur_col).value = resp + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_value + ) + cur_row += 1 + cur_col += 1 + cur_row = self.start_row + 2 + except (KeyError, IndexingError, AttributeError) as exc: + self.log("filling response field of summary sheet failed", True, exc) + return self.start_row + 1, self.start_col + 1 + return cur_row, cur_col + + def add_failure_sheet( + self, raw_df: pd.DataFrame, scan_mode: ScanMode, sheet_name_suffix: str = "" + ) -> bool: + """ + add failure(undocumented or missing) sheet to report EXCEL file. + """ + if scan_mode == ScanMode.UNKNOWN: + self.log("adding summary sheet failed: scan mode unknown.") + return False + try: + dft_err_df = self.get_dft_err_df_from_raw(raw_df) + sess_vec = np.array(dft_err_df.columns) + if scan_mode == ScanMode.SERV: + fail_vec = np.array([Failure.UNDOC_SERV, Failure.MISS_SERV]) + width = XlDesign.dim_wide + if scan_mode == ScanMode.IDEN: + fail_vec = np.array([Failure.UNDOC_IDEN, Failure.MISS_IDEN]) + sbfn_vec = np.sort(pd.unique(raw_df[ColNm.sbfn])) + width = XlDesign.dim_middle + cur_row = self.start_row + cur_col = self.start_col + sess_lu_vec = self.get_sess_lu() + for fail in fail_vec: + if (fail == Failure.UNDOC_SERV) or (fail == Failure.UNDOC_IDEN): + sheet_name = ShtNm.undoc + if (fail == Failure.MISS_SERV) or (fail == Failure.MISS_IDEN): + sheet_name = ShtNm.miss + self.worksheet = self.workbook.create_sheet( + f"{sheet_name}{sheet_name_suffix}" + ) + self.worksheet.freeze_panes = self.worksheet.cell( + self.start_row + 1, self.start_col + ).coordinate + for sess in sess_vec: + self.set_cell_width(cur_col, width) + self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( + sess, self.sess_code_dict + ) + if dft_err_df[sess][0] == -1: + self.worksheet.cell(cur_row, cur_col).value = ( + str(self.worksheet.cell(cur_row, cur_col).value) + + "\n" + + CellCnt.sess_unscn + ) + if sess_lu_vec.size > 0: + if not sess in sess_lu_vec: + self.worksheet.cell(cur_row, cur_col).value = ( + str(self.worksheet.cell(cur_row, cur_col).value) + + "\n" + + CellCnt.sess_undoc + ) + self.set_cell_height(cur_row, XlDesign.dim_mid_wide) + self.worksheet.cell(cur_row, cur_col).alignment = Alignment( + horizontal="general", vertical="top" + ) + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_index + ) + cur_row += 1 + cond = raw_df[ColNm.fail].apply( + lambda x, fl=fail: self.check_fail(x, fl) + ) & (raw_df[ColNm.sess] == sess) + if scan_mode == ScanMode.SERV: + serv_vec = np.sort(pd.unique(raw_df.loc[cond, ColNm.serv])) + for serv in serv_vec: + self.worksheet.cell( + cur_row, cur_col + ).value = self.get_code_text(serv, self.iso_serv_code_dict) + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_value + ) + cur_row += 1 + cur_col += 1 + if scan_mode == ScanMode.IDEN: + if sbfn_vec.size > 1: + raw_df[ColNm.combi] = list( + zip(raw_df[ColNm.iden], raw_df[ColNm.sbfn]) + ) + iden_sbfn_vec = np.sort( + pd.unique(raw_df.loc[cond, ColNm.combi]) + ) + for iden_sbfn in iden_sbfn_vec: + iden = iden_sbfn[0] + sbfn = iden_sbfn[1] + if iden == -1: + entry = CellCnt.no_ent + else: + entry = f"0x{iden:04X} subfunc:{sbfn:02}" + self.worksheet.cell(cur_row, cur_col).value = entry + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_value + ) + cur_row += 1 + cur_col += 1 + else: + iden_vec = np.sort(pd.unique(raw_df.loc[cond, ColNm.iden])) + for iden in iden_vec: + if iden == -1: + entry = CellCnt.no_ent + else: + entry = f"0x{iden:04X}" + self.worksheet.cell(cur_row, cur_col).value = entry + self.worksheet.cell(cur_row, cur_col).font = Font( + name=XlDesign.font_value + ) + cur_row += 1 + cur_col += 1 + cur_row = self.start_row + cur_col = self.start_col + except (KeyError, IndexingError, AttributeError, SheetTitleException) as exc: + self.log("adding failure summary sheets failed", True, exc) + return False + return True + + def load_color_code(self, path: str) -> bool: + """ + load color codes from JSON file + """ + try: + with open(path, encoding="utf8") as src_json: + color_code_ls = json.load(src_json) + self.color_code_dict = { + color_code[KyNm.err]: color_code[KyNm.rgb] + for color_code in color_code_ls + } + except (FileNotFoundError, KeyError, JSONDecodeError) as exc: + self.log("loading color codes failed", True, exc) + return False + return True + + def set_cell_width(self, col: int, width: int) -> bool: + """ + set the cell width of given input column in the current EXCEL worksheet. + """ + try: + self.worksheet.column_dimensions[get_column_letter(col)].width = width + except (KeyError, AttributeError) as exc: + self.log("setting cell width failed", True, exc) + return False + return True + + def set_cell_height(self, row: int, height: int) -> bool: + """ + set the cell height of given input row in the current EXCEL worksheet. + """ + try: + self.worksheet.row_dimensions[row].height = height + except (KeyError, AttributeError) as exc: + self.log("setting cell height failed", True, exc) + return False + return True + + def fill_cell(self, row: int, col: int, error: int) -> bool: + """ + fill a cell with color by given input error code. + """ + try: + self.worksheet.cell(row, col).fill = PatternFill( + start_color=self.get_err_rgb(error), + end_color=self.get_err_rgb(error), + fill_type="solid", + ) + except (KeyError, AttributeError) as exc: + self.log("filling cell failed", True, exc) + return False + return True + + def check_fail(self, fail: int, fail_class: Failure) -> bool: + """ + check if given failure belongs to given faliure class. + """ + if (fail // FAIL_CLS_CAP) == (fail_class // FAIL_CLS_CAP): + return True + else: + return False + + def get_code_text(self, code: int, ref: Dict[int, str]) -> str: + """ + get combined string of hex code and correspoding name + with a given code and a given dictionary. + """ + try: + txt = ref[code] + except KeyError: + txt = "Unknown Code" + if code == -1 or code == 0: + code_txt = f"{txt}" + else: + code_txt = f"0x{int(code):02X} {txt}" + return code_txt + + def get_err_rgb(self, error: int) -> str: + """ + get RGB color code string for an error reponse. + """ + try: + return "00" + self.color_code_dict[error] + except KeyError: + return "00FFFFFF" + + def get_gray_color(self, step: int) -> str: + seed = ((step % 8) * 20) + 100 + return f"00{int(seed):02X}{int(seed):02X}{int(seed):02X}" From 2ac2acc1b0d84a4164323bb0589afedf41b15603 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 5 Jul 2022 07:45:31 +0200 Subject: [PATCH 02/26] fix --- pyproject.toml | 5 + src/gallia/analyzer/main.py | 233 ++++++++++++++-------------- src/gallia/analyzer/xl_generator.py | 54 +++---- 3 files changed, 140 insertions(+), 152 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e08e45b48..6f2e456c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,10 @@ exitcode = "^0.1.0" psutil = ">=5.9.4,<7.0.0" httpx = ">=0.26,<0.28" more-itertools = "^10.3.0" +numpy = "^1.21.4" +openpyxl = "^3.0.9" +pandas = "^1.3.4" +matplotlib = "^3.4.3" [tool.poetry.group.dev.dependencies] Sphinx = ">=5.2,<8.0" @@ -73,6 +77,7 @@ ruff = "^0.5.0" "netzteil" = "opennetzteil.cli:main" "cursed-hr" = "cursed_hr.cursed_hr:main" "hr" = "hr:main" +"analyze" = "gallia.analyzer.main:AnalyzerMain" [tool.mypy] strict = true diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index d8e6fe157..65ce6e0e4 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -2,6 +2,7 @@ """ gallia-analyze main script """ +from argparse import Namespace import sys import time import argparse @@ -15,6 +16,7 @@ from gallia.analyzer.time_analyzer import TimeAnalyzer from gallia.analyzer.mode_config import LogMode from gallia.analyzer.arg_help import ArgHelp +from gallia.udscan.core import Script # ========================================================== # # [Rule for arguments] @@ -25,134 +27,125 @@ # Parameter: one word lowercase(sometimes with dash) # ========================================================== # -parser = argparse.ArgumentParser(usage=textwrap.dedent(ArgHelp.usage)) - -# Commands -grp_cmd = parser.add_argument_group("Command") -grp_cmd.add_argument("-a", action="store_true", help=ArgHelp.analyze) -grp_cmd.add_argument("-c", action="store_true", help=ArgHelp.clear) -grp_cmd.add_argument("-e", action="store_true", help=ArgHelp.extract) -grp_cmd.add_argument("-i", action="store_true", help=ArgHelp.aio_iden) -grp_cmd.add_argument("-r", action="store_true", help=ArgHelp.report) -grp_cmd.add_argument("-s", action="store_true", help=ArgHelp.aio_serv) -grp_cmd.add_argument("-t", action="store_true", help=ArgHelp.time) - -# Options -grp_opt = parser.add_argument_group("Option") -grp_opt.add_argument("-A", action="store_true", help=ArgHelp.all_serv) -grp_opt.add_argument("-D", action="store_true", help=ArgHelp.debug) -grp_opt.add_argument("-I", action="store_true", help=ArgHelp.iso) -grp_opt.add_argument("-L", action="store_true", help=ArgHelp.log) -grp_opt.add_argument("-P", action="store_true", help=ArgHelp.possible) -grp_opt.add_argument("-C", action="store_true", help=ArgHelp.cat) - -# Parameters -grp_param = parser.add_argument_group("Parameter") -grp_param.add_argument("--sid", type=int, help=ArgHelp.sid, default=-1) -grp_param.add_argument("--from", type=int, help=ArgHelp.first, default=0) -grp_param.add_argument("--to", type=int, help=ArgHelp.last, default=0) -grp_param.add_argument("--output", type=str, help=ArgHelp.output, default="") -grp_param.add_argument("--source", type=str, help=ArgHelp.source, default="") -grp_param.add_argument("--precision", type=int, help=ArgHelp.prec, default=0) - -args = vars(parser.parse_args()) - -# Commands -analyze_on = args["a"] -clear_on = args["c"] -extract_on = args["e"] -aio_identifier_on = args["i"] -report_on = args["r"] -aio_service_on = args["s"] -t_analyze_on = args["t"] - -# Functional Options -all_services_on = args["A"] -debug_on = args["D"] -iso_on = args["I"] -log_file_on = args["L"] -show_possible_on = args["P"] -categorizer_on = args["C"] - -# Parameters -service_id = args["sid"] -db_path = args["source"] -run_start = args["from"] -run_end = args["to"] + 1 -file_path = args["output"] -t_prec = args["precision"] - -if run_end <= run_start: - run_end = run_start + 1 - -if db_path == "": - print("Please set database path with --source option!") - sys.exit() - - -def main() -> None: - """ - gallia-analyze: command line mode main function - """ - start_time = time.process_time() - - if log_file_on: - log_mode = LogMode.LOG_FILE - else: - log_mode = LogMode.STD_OUT - - if run_start == 0 and run_end == 1: - operator = Operator(db_path) - runs_vec = operator.get_runs() - else: - runs_vec = np.arange(run_start, run_end) - - if clear_on or extract_on: - extractor = Extractor(db_path, log_mode) - - if clear_on: - extractor.clear() - - if extract_on: - extractor.extract(runs_vec) - - if analyze_on: - if categorizer_on: - analyzer = Categorizer(db_path, log_mode) +class AnalyzerMain(Script): + def add_parser(self) -> None: + # Commands + grp_cmd = self.parser.add_argument_group("Command") + grp_cmd.add_argument("-a", action="store_true", help=ArgHelp.analyze) + grp_cmd.add_argument("-c", action="store_true", help=ArgHelp.clear) + grp_cmd.add_argument("-e", action="store_true", help=ArgHelp.extract) + grp_cmd.add_argument("-i", action="store_true", help=ArgHelp.aio_iden) + grp_cmd.add_argument("-r", action="store_true", help=ArgHelp.report) + grp_cmd.add_argument("-s", action="store_true", help=ArgHelp.aio_serv) + grp_cmd.add_argument("-t", action="store_true", help=ArgHelp.time) + + # Options + grp_opt = self.parser.add_argument_group("Option") + grp_opt.add_argument("-A", action="store_true", help=ArgHelp.all_serv) + grp_opt.add_argument("-D", action="store_true", help=ArgHelp.debug) + grp_opt.add_argument("-I", action="store_true", help=ArgHelp.iso) + grp_opt.add_argument("-L", action="store_true", help=ArgHelp.log) + grp_opt.add_argument("-P", action="store_true", help=ArgHelp.possible) + grp_opt.add_argument("-C", action="store_true", help=ArgHelp.cat) + + # Parameters + grp_param = self.parser.add_argument_group("Parameter") + grp_param.add_argument("--sid", type=int, help=ArgHelp.sid, default=-1) + grp_param.add_argument("--from", type=int, help=ArgHelp.first, default=0) + grp_param.add_argument("--to", type=int, help=ArgHelp.last, default=0) + grp_param.add_argument("--output", type=str, help=ArgHelp.output, default="") + grp_param.add_argument("--source", type=str, help=ArgHelp.source, default="") + grp_param.add_argument("--precision", type=int, help=ArgHelp.prec, default=0) + + def main(self, args: Namespace) -> None: + args = vars(args) + # Commands + analyze_on = args["a"] + clear_on = args["c"] + extract_on = args["e"] + aio_identifier_on = args["i"] + report_on = args["r"] + aio_service_on = args["s"] + t_analyze_on = args["t"] + + # Functional Options + all_services_on = args["A"] + debug_on = args["D"] + iso_on = args["I"] + log_file_on = args["L"] + show_possible_on = args["P"] + categorizer_on = args["C"] + + # Parameters + service_id = args["sid"] + db_path = args["source"] + run_start = args["from"] + run_end = args["to"] + 1 + file_path = args["output"] + t_prec = args["precision"] + + if run_end <= run_start: + run_end = run_start + 1 + + if db_path == "": + print("Please set database path with --source option!") + sys.exit() + + start_time = time.process_time() + + if log_file_on: + log_mode = LogMode.LOG_FILE else: - analyzer = Analyzer(db_path, log_mode, debug_on) - an_opt = analyzer.get_op_mode(iso_on) - analyzer.analyze(runs_vec, an_opt) + log_mode = LogMode.STD_OUT - if t_analyze_on: - if t_prec > 0: - time_analyzer = TimeAnalyzer(db_path, t_prec, log_mode) + if run_start == 0 and run_end == 1: + operator = Operator(db_path) + runs_vec = operator.get_runs() else: - time_analyzer = TimeAnalyzer(db_path, log_mode=log_mode) - time_analyzer.extract_tra(runs_vec) - time_analyzer.hist_tra(runs_vec) - time_analyzer.plot_tra(runs_vec) + runs_vec = np.arange(run_start, run_end) - if report_on or aio_service_on or aio_identifier_on: - reporter = Reporter(db_path, log_mode) + if clear_on or extract_on: + extractor = Extractor(db_path, log_mode) - if report_on: - reporter.report_xl(runs_vec, show_possible_on, file_path) + if clear_on: + extractor.clear() - if aio_service_on: - reporter.consolidate_xl_serv(file_path, show_possible_on) + if extract_on: + extractor.extract(runs_vec) - if aio_identifier_on: - if all_services_on: - reporter.iterate_all(file_path, show_possible_on) - else: - if service_id == -1: - print("Please input Service ID with --sid option.") + if analyze_on: + if categorizer_on: + analyzer = Categorizer(db_path, log_mode) + else: + analyzer = Analyzer(db_path, log_mode, debug_on) + an_opt = analyzer.get_op_mode(iso_on) + analyzer.analyze(runs_vec, an_opt) + + if t_analyze_on: + if t_prec > 0: + time_analyzer = TimeAnalyzer(db_path, t_prec, log_mode) else: - reporter.consolidate_xl_iden(service_id, file_path, show_possible_on) + time_analyzer = TimeAnalyzer(db_path, log_mode=log_mode) + time_analyzer.extract_tra(runs_vec) + time_analyzer.hist_tra(runs_vec) + time_analyzer.plot_tra(runs_vec) - print(f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}") + if report_on or aio_service_on or aio_identifier_on: + reporter = Reporter(db_path, log_mode) + if report_on: + reporter.report_xl(runs_vec, show_possible_on, file_path) + + if aio_service_on: + reporter.consolidate_xl_serv(file_path, show_possible_on) + + if aio_identifier_on: + if all_services_on: + reporter.iterate_all(file_path, show_possible_on) + else: + if service_id == -1: + print("Please input Service ID with --sid option.") + else: + reporter.consolidate_xl_iden(service_id, file_path, show_possible_on) -if __name__ == "__main__": - main() + print(f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}") diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index 872cbdd27..8cd09dc95 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -129,7 +129,7 @@ def add_sum_sheet_iden( dft_err_df = self.get_dft_err_df_from_raw(raw_df) cur_row, cur_col = self.sum_sheet_fill_origin(ScanMode.IDEN, serv, sbfn_vec) cur_row, cur_col = self.sum_sheet_fill_index( - cur_row, cur_col, entries_vec, ScanMode.IDEN, sbfn_vec + cur_row, cur_col, raw_df[raw_df['identifier'].isin(entries_vec)], ScanMode.IDEN ) cur_row, cur_col = self.sum_sheet_fill_sess(cur_row, cur_col, dft_err_df) cur_row, cur_col = self.sum_sheet_fill_resp( @@ -189,55 +189,45 @@ def sum_sheet_fill_index( self, cur_row: int, cur_col: int, - entries_vec: np.ndarray, + entries_vec: pd.DataFrame, scan_mode: ScanMode, - sbfn_vec: np.ndarray = np.array([]), ) -> Tuple[int, int]: """ fill index column in summary sheet. """ + entries_vec = entries_vec.drop_duplicates(['subfunc', 'identifier']) try: - for entry in entries_vec: + for _, row in entries_vec.iterrows(): if scan_mode == ScanMode.SERV: self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( entry, self.iso_serv_code_dict ) cur_row += 1 if scan_mode == ScanMode.IDEN: - if entry == -1: + if row.identifier == -1: index_name = CellCnt.no_ent else: - index_name = f"0x{int(entry):04X}" - for sbfn in sbfn_vec: - if sbfn_vec.size > 1: - self.worksheet.cell( - cur_row, self.start_col - ).value = index_name - self.worksheet.cell( - cur_row, self.start_col + 1 - ).value = sbfn - fill_color = self.get_gray_color(sbfn) - self.worksheet.cell( - cur_row, self.start_col + 1 - ).fill = PatternFill( - start_color=fill_color, - end_color=fill_color, - fill_type="solid", - ) - self.worksheet.cell( - cur_row, self.start_col + 1 - ).alignment = Alignment(horizontal="center") - cur_row += 1 - else: - self.worksheet.cell(cur_row, cur_col).value = index_name - cur_row += 1 + index_name = f"0x{int(row.identifier):04X}" + self.worksheet.cell( + cur_row, self.start_col + ).value = index_name + cur_row += 1 + if row.subfunc != -1: + # service has subfunction and identifer + self.worksheet.cell( + cur_row, self.start_col + 1 + ).value = row.subfunc + cur_row += 1 + self.worksheet.cell(cur_row, cur_col).font = Font( name=XlDesign.font_index ) - if sbfn_vec.size > 1: - cur_col += 2 - else: + if -1 in entries_vec['subfunc']: + # has no sub function cur_col += 1 + else: + # has sub function and identifer + cur_col += 2 cur_row = self.start_row except (KeyError, AttributeError) as exc: self.log("filling index of summary sheet failed", True, exc) From db6748d59f6001d2c48f7d6ee735460862623a5d Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 5 Jul 2022 08:12:40 +0200 Subject: [PATCH 03/26] fix code style --- src/gallia/analyzer/__init__.py | 1 - src/gallia/analyzer/analyzer.py | 25 +++++++--------- src/gallia/analyzer/arg_help.py | 5 +--- src/gallia/analyzer/categorizer.py | 9 ++---- src/gallia/analyzer/config.py | 3 -- src/gallia/analyzer/constants.py | 3 -- src/gallia/analyzer/db_handler.py | 3 -- src/gallia/analyzer/exceptions.py | 3 -- src/gallia/analyzer/extractor.py | 45 +++++++++++++--------------- src/gallia/analyzer/failure.py | 3 -- src/gallia/analyzer/main.py | 11 ++++--- src/gallia/analyzer/mode_config.py | 3 -- src/gallia/analyzer/name_config.py | 2 -- src/gallia/analyzer/operator.py | 6 +--- src/gallia/analyzer/reporter.py | 5 +--- src/gallia/analyzer/time_analyzer.py | 3 -- src/gallia/analyzer/xl_generator.py | 20 ++++++------- 17 files changed, 54 insertions(+), 96 deletions(-) diff --git a/src/gallia/analyzer/__init__.py b/src/gallia/analyzer/__init__.py index 8b1378917..e69de29bb 100644 --- a/src/gallia/analyzer/__init__.py +++ b/src/gallia/analyzer/__init__.py @@ -1 +0,0 @@ - diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py index 17dabc6a2..5ef5d9fc9 100644 --- a/src/gallia/analyzer/analyzer.py +++ b/src/gallia/analyzer/analyzer.py @@ -14,9 +14,6 @@ from gallia.analyzer.mode_config import LogMode, ScanMode, OpMode from gallia.analyzer.name_config import ColNm, KyNm, TblNm, VwNm, NEG_STR -if __name__ == "__main__": - exit() - class Analyzer(Operator): """ @@ -144,26 +141,26 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: return False create_view_sql = f""" DROP VIEW IF EXISTS "{VwNm.sess_alwd}"; - CREATE VIEW "{VwNm.sess_alwd}" - AS SELECT "{ColNm.sess}" + CREATE VIEW "{VwNm.sess_alwd}" + AS SELECT "{ColNm.sess}" FROM "{TblNm.ven_lu}" WHERE "{ColNm.serv}" = {serv} GROUP BY "{ColNm.sess}"; DROP VIEW IF EXISTS "{VwNm.sbfn_alwd}"; - CREATE VIEW "{VwNm.sbfn_alwd}" - AS SELECT "{ColNm.sbfn}" + CREATE VIEW "{VwNm.sbfn_alwd}" + AS SELECT "{ColNm.sbfn}" FROM "{TblNm.ven_lu}" WHERE "{ColNm.serv}" = {serv} - GROUP BY "{ColNm.sbfn}"; + GROUP BY "{ColNm.sbfn}"; DROP VIEW IF EXISTS "{VwNm.resp_alwd}"; - CREATE VIEW "{VwNm.resp_alwd}" - AS SELECT "{ColNm.resp}" + CREATE VIEW "{VwNm.resp_alwd}" + AS SELECT "{ColNm.resp}" FROM "{TblNm.ref_resp}" WHERE "{ColNm.serv}" = {serv} - GROUP BY "{ColNm.resp}"; + GROUP BY "{ColNm.resp}"; DROP VIEW IF EXISTS "{VwNm.ref_vw}"; - CREATE VIEW "{VwNm.ref_vw}" - AS SELECT "{ColNm.serv}", "{ColNm.sess}", "{ColNm.boot}", + CREATE VIEW "{VwNm.ref_vw}" + AS SELECT "{ColNm.serv}", "{ColNm.sess}", "{ColNm.boot}", "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.ecu_mode}" FROM "{TblNm.ven_lu}" WHERE "{ColNm.serv}" = {serv}; """ @@ -185,7 +182,7 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: self.log("condition key reading failed", True, exc) drop_view_sql = f""" DROP VIEW IF EXISTS "{VwNm.sess_alwd}"; - DROP VIEW IF EXISTS "{VwNm.sbfn_alwd}"; + DROP VIEW IF EXISTS "{VwNm.sbfn_alwd}"; DROP VIEW IF EXISTS "{VwNm.resp_alwd}"; DROP VIEW IF EXISTS "{VwNm.ref_vw}"; """ diff --git a/src/gallia/analyzer/arg_help.py b/src/gallia/analyzer/arg_help.py index 3c2ef29d2..848b18325 100644 --- a/src/gallia/analyzer/arg_help.py +++ b/src/gallia/analyzer/arg_help.py @@ -2,9 +2,6 @@ gallia-analyze module for argument help texts """ -if __name__ == "__main__": - exit() - class ArgHelp: """ @@ -35,7 +32,7 @@ class for argument help text Clear all analyzed data in database. """ extract = """ - Extract JSON data, etc. from database and store into relational database. + Extract JSON data, etc. from database and store into relational database. """ aio_iden = """ Consolidate all scan_identifier runs into one EXCEL file sorted by ECU mode for a certain Service ID. diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index 549cdd43a..ae90e69bd 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -13,9 +13,6 @@ from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException from gallia.uds.core.constants import UDSIsoServices, UDSErrorCodes -if __name__ == "__main__": - exit() - class Categorizer(Analyzer): """ @@ -136,7 +133,7 @@ def check_sess_alwd(self, serv: int, sess: int, op_mode: OpMode, ecu_mode) -> bo ref_df = self.ref_ven_df[ecu_mode] if op_mode == OpMode.ISO: ref_df = self.ref_iso_df - if not serv in ref_df.index: + if serv not in ref_df.index: return False return sess in ref_df.loc[serv, ColNm.sess] @@ -144,7 +141,7 @@ def check_resp_alwd(self, serv: int, resp: int) -> bool: """ check if a certain response is available or supported for a certain service. """ - if not serv in list(self.ref_iso_df.index): + if serv not in list(self.ref_iso_df.index): return False return ( resp @@ -161,7 +158,7 @@ def check_sbfn_alwd(self, serv: int, sbfn: int, op_mode: OpMode, ecu_mode) -> bo ref_df = self.ref_ven_df[ecu_mode] if op_mode == OpMode.ISO: ref_df = self.ref_iso_df - if not serv in ref_df.index: + if serv not in ref_df.index: return False return sbfn in ref_df.loc[serv, ColNm.sbfn] diff --git a/src/gallia/analyzer/config.py b/src/gallia/analyzer/config.py index c0fb5cf8f..1398ed05e 100644 --- a/src/gallia/analyzer/config.py +++ b/src/gallia/analyzer/config.py @@ -7,9 +7,6 @@ from gallia.analyzer.name_config import ColNm from gallia.analyzer.constants import SqlDataType -if __name__ == "__main__": - exit() - def load_resource_file(path: str) -> str: """ diff --git a/src/gallia/analyzer/constants.py b/src/gallia/analyzer/constants.py index c2f9abdde..1f5aadc53 100644 --- a/src/gallia/analyzer/constants.py +++ b/src/gallia/analyzer/constants.py @@ -3,9 +3,6 @@ """ from enum import IntEnum -if __name__ == "__main__": - exit() - class UDSIsoSessions(IntEnum): """ diff --git a/src/gallia/analyzer/db_handler.py b/src/gallia/analyzer/db_handler.py index 6688eadb5..4bba207f8 100644 --- a/src/gallia/analyzer/db_handler.py +++ b/src/gallia/analyzer/db_handler.py @@ -10,9 +10,6 @@ from gallia.analyzer.mode_config import LogMode from gallia.analyzer.name_config import ColNm -if __name__ == "__main__": - exit() - class DatabaseHandler: """ diff --git a/src/gallia/analyzer/exceptions.py b/src/gallia/analyzer/exceptions.py index 2a66075b6..3b9ee6d9f 100644 --- a/src/gallia/analyzer/exceptions.py +++ b/src/gallia/analyzer/exceptions.py @@ -2,9 +2,6 @@ gallia-analyze Exceptions module """ -if __name__ == "__main__": - exit() - class EmptyTableException(Exception): """ diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index 70d2263cd..0584db948 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -9,9 +9,6 @@ from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import TblNm, ColNm, VwNm -if __name__ == "__main__": - exit() - class Extractor(Operator): @@ -72,24 +69,24 @@ def extract_serv(self, run: int) -> bool: WHEN json_extract("response_data", '$.response_code') IS NULL THEN -1 ELSE json_extract("response_data", '$.response_code') END "{ColNm.resp}" - FROM "{TblNm.scan_result}" WHERE "{ColNm.run}" = {str(run)} + FROM "{TblNm.scan_result}" WHERE "{ColNm.run}" = {str(run)} AND "log_mode" = "explicit" OR "log_mode" = "emphasized"; - INSERT INTO "{TblNm.serv}" ("{ColNm.id}", "{ColNm.run}", + INSERT INTO "{TblNm.serv}" ("{ColNm.id}", "{ColNm.run}", "{ColNm.t_rqst}", "{ColNm.t_resp}", - "{ColNm.ecu_mode}", "{ColNm.serv}", + "{ColNm.ecu_mode}", "{ColNm.serv}", "{ColNm.sess}", "{ColNm.boot}", "{ColNm.resp}") SELECT "{ColNm.id}", "{ColNm.run}", "{ColNm.t_rqst}", "{ColNm.t_resp}", - CASE WHEN "{ColNm.ecu_mode}" IS NULL THEN 0 + CASE WHEN "{ColNm.ecu_mode}" IS NULL THEN 0 ELSE "{ColNm.ecu_mode}" END "{ColNm.ecu_mode}", - "{ColNm.serv}", "{ColNm.sess}", + "{ColNm.serv}", "{ColNm.sess}", CASE WHEN "{ColNm.boot}" IS NULL AND "{ColNm.sess}" = 2 THEN 1 WHEN "{ColNm.boot}" IS NULL THEN 0 ELSE "{ColNm.boot}" END "{ColNm.boot}", "{ColNm.resp}" - FROM "{VwNm.resp_vw}" + FROM "{VwNm.resp_vw}" LEFT JOIN "{TblNm.meta}" ON "{TblNm.meta}"."{ColNm.run_id}" = "{VwNm.resp_vw}"."{ColNm.run}"; UPDATE "{TblNm.serv}" SET "{ColNm.fail}" = 255; @@ -116,24 +113,24 @@ def extract_iden(self, run: int) -> bool: return False extract_sql = f""" DROP VIEW IF EXISTS "{VwNm.resp_vw}"; - CREATE VIEW "{VwNm.resp_vw}" + CREATE VIEW "{VwNm.resp_vw}" AS SELECT "{ColNm.id}", "{ColNm.run}", "{ColNm.t_rqst}", "{ColNm.t_resp}", json_extract("request_data", '$.service_id') AS "{ColNm.serv}", json_extract("state", '$.session') AS "{ColNm.sess}", json_extract("state", '$.boot') AS "{ColNm.boot}", - CASE WHEN json_extract("request_data", '$.service_id') = 49 + CASE WHEN json_extract("request_data", '$.service_id') = 49 THEN json_extract("request_data", '$.sub_function') ELSE -1 END "{ColNm.sbfn}", - CASE WHEN json_extract("request_data", '$.service_id') = 49 + CASE WHEN json_extract("request_data", '$.service_id') = 49 THEN json_extract("request_data", '$.routine_identifier') - WHEN json_extract("request_data", '$.service_id') = 39 + WHEN json_extract("request_data", '$.service_id') = 39 AND json_extract("request_data", '$.sub_function') IS NULL THEN -1 - WHEN json_extract("request_data", '$.service_id') = 39 + WHEN json_extract("request_data", '$.service_id') = 39 THEN json_extract("request_data", '$.sub_function') - WHEN json_extract("request_data", '$.data_identifier') IS NULL - THEN json_extract("request_data", '$.data_identifiers[0]') + WHEN json_extract("request_data", '$.data_identifier') IS NULL + THEN json_extract("request_data", '$.data_identifiers[0]') ELSE json_extract("request_data", '$.data_identifier') END "{ColNm.iden}", json_extract("request_data", '$.identifier') AS "{ColNm.iden}", @@ -141,25 +138,25 @@ def extract_iden(self, run: int) -> bool: WHEN json_extract("response_data", '$.response_code') IS NULL THEN -1 ELSE json_extract("response_data", '$.response_code') END "{ColNm.resp}" - FROM "{TblNm.scan_result}" + FROM "{TblNm.scan_result}" WHERE "{ColNm.run}" = {str(run)} AND "log_mode" = "explicit" OR "log_mode" = "emphasized"; - INSERT INTO "{TblNm.iden}" ("{ColNm.id}", "{ColNm.run}", - "{ColNm.t_rqst}", "{ColNm.t_resp}", "{ColNm.ecu_mode}", + INSERT INTO "{TblNm.iden}" ("{ColNm.id}", "{ColNm.run}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", "{ColNm.ecu_mode}", "{ColNm.serv}", "{ColNm.sess}", "{ColNm.boot}", "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.resp}") SELECT "{ColNm.id}", "{ColNm.run}", - "{ColNm.t_rqst}", "{ColNm.t_resp}", + "{ColNm.t_rqst}", "{ColNm.t_resp}", CASE WHEN "{ColNm.ecu_mode}" IS NULL THEN 0 ELSE "{ColNm.ecu_mode}" - END "{ColNm.ecu_mode}", + END "{ColNm.ecu_mode}", "{ColNm.serv}", "{ColNm.sess}", CASE WHEN "{ColNm.boot}" IS NULL AND "{ColNm.sess}" = 2 THEN 1 WHEN "{ColNm.boot}" IS NULL THEN 0 ELSE "{ColNm.boot}" END "{ColNm.boot}", - "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.resp}" - FROM "{VwNm.resp_vw}" + "{ColNm.sbfn}", "{ColNm.iden}", "{ColNm.resp}" + FROM "{VwNm.resp_vw}" LEFT JOIN "{TblNm.meta}" ON "{TblNm.meta}"."{ColNm.run_id}" = "{VwNm.resp_vw}"."{ColNm.run}"; UPDATE "{TblNm.iden}" SET "{ColNm.fail}" = 255; @@ -176,7 +173,7 @@ def extract_iden(self, run: int) -> bool: def check_boot(self, run: int) -> bool: try: check_sql = f""" - SELECT json_extract("state", '$.boot') as "{ColNm.boot}" + SELECT json_extract("state", '$.boot') as "{ColNm.boot}" FROM "{TblNm.scan_result}" WHERE "{ColNm.run}" = {str(run)}; """ boot_df = self.get_df_by_query(check_sql) diff --git a/src/gallia/analyzer/failure.py b/src/gallia/analyzer/failure.py index cccebcb20..fb5f46a1d 100644 --- a/src/gallia/analyzer/failure.py +++ b/src/gallia/analyzer/failure.py @@ -3,9 +3,6 @@ """ from enum import IntEnum -if __name__ == "__main__": - exit() - class Failure(IntEnum): """ diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 65ce6e0e4..3840894ff 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -5,8 +5,6 @@ from argparse import Namespace import sys import time -import argparse -import textwrap import numpy as np from gallia.analyzer.operator import Operator from gallia.analyzer.analyzer import Analyzer @@ -27,6 +25,7 @@ # Parameter: one word lowercase(sometimes with dash) # ========================================================== # + class AnalyzerMain(Script): def add_parser(self) -> None: # Commands @@ -146,6 +145,10 @@ def main(self, args: Namespace) -> None: if service_id == -1: print("Please input Service ID with --sid option.") else: - reporter.consolidate_xl_iden(service_id, file_path, show_possible_on) + reporter.consolidate_xl_iden( + service_id, file_path, show_possible_on + ) - print(f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}") + print( + f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}" + ) diff --git a/src/gallia/analyzer/mode_config.py b/src/gallia/analyzer/mode_config.py index e0754beb5..c1f2642e4 100644 --- a/src/gallia/analyzer/mode_config.py +++ b/src/gallia/analyzer/mode_config.py @@ -4,9 +4,6 @@ from enum import IntEnum -if __name__ == "__main__": - exit() - class ScanMode(IntEnum): """ diff --git a/src/gallia/analyzer/name_config.py b/src/gallia/analyzer/name_config.py index d38860b3a..654124a05 100644 --- a/src/gallia/analyzer/name_config.py +++ b/src/gallia/analyzer/name_config.py @@ -1,8 +1,6 @@ """ gallia-analyze Name Config module """ -if __name__ == "__main__": - exit() NEG_STR = "NOT " diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 17d5fd52a..1d55d254e 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -20,10 +20,6 @@ from gallia.uds.core.constants import UDSErrorCodes, UDSIsoServices -if __name__ == "__main__": - exit() - - class Operator(DatabaseHandler): """ Class for common basic operations and utilites such as loading meta data of runs, @@ -253,7 +249,7 @@ def load_meta(self, force: bool = False) -> bool: "script" AS "{ColNm.scan_mode}" FROM "{TblNm.run_meta}"; DROP TABLE IF EXISTS "{TblNm.meta}"; - CREATE TABLE "{TblNm.meta}" + CREATE TABLE "{TblNm.meta}" AS SELECT "{ColNm.run_id}", "{ColNm.ecu_mode}", "{ColNm.scan_mode}" FROM "{VwNm.ecu_vw}" INNER JOIN "{VwNm.mode_vw}" diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index aa352e17b..de05b31ec 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -12,9 +12,6 @@ from gallia.analyzer.name_config import ColNm, TblNm from gallia.analyzer.exceptions import ColumnMismatchException, EmptyTableException -if __name__ == "__main__": - exit() - class Reporter(Operator): """ @@ -89,7 +86,7 @@ def consolidate_xl_iden( consolidate all scan_identifier runs sorted by ECU mode for a certain given service into one EXCEL file. """ - if not serv in self.iso_serv_by_iden_vec: + if serv not in self.iso_serv_by_iden_vec: self.log("given Service ID is not service by identifier.") return False if not self.load_meta(force=True): diff --git a/src/gallia/analyzer/time_analyzer.py b/src/gallia/analyzer/time_analyzer.py index 0388f5bfc..8d45fd4dc 100644 --- a/src/gallia/analyzer/time_analyzer.py +++ b/src/gallia/analyzer/time_analyzer.py @@ -12,9 +12,6 @@ from gallia.analyzer.mode_config import ScanMode, LogMode from gallia.analyzer.name_config import ColNm, TblNm, KyNm -if __name__ == "__main__": - exit() - class TimeAnalyzer(Reporter): """ diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index 8cd09dc95..98cbb0a74 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -23,9 +23,6 @@ from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import ColNm, ShtNm, CellCnt, KyNm -if __name__ == "__main__": - exit() - class ExcelGenerator(Operator): @@ -129,7 +126,10 @@ def add_sum_sheet_iden( dft_err_df = self.get_dft_err_df_from_raw(raw_df) cur_row, cur_col = self.sum_sheet_fill_origin(ScanMode.IDEN, serv, sbfn_vec) cur_row, cur_col = self.sum_sheet_fill_index( - cur_row, cur_col, raw_df[raw_df['identifier'].isin(entries_vec)], ScanMode.IDEN + cur_row, + cur_col, + raw_df[raw_df["identifier"].isin(entries_vec)], + ScanMode.IDEN, ) cur_row, cur_col = self.sum_sheet_fill_sess(cur_row, cur_col, dft_err_df) cur_row, cur_col = self.sum_sheet_fill_resp( @@ -195,12 +195,12 @@ def sum_sheet_fill_index( """ fill index column in summary sheet. """ - entries_vec = entries_vec.drop_duplicates(['subfunc', 'identifier']) + entries_vec = entries_vec.drop_duplicates(["subfunc", "identifier"]) try: for _, row in entries_vec.iterrows(): if scan_mode == ScanMode.SERV: self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( - entry, self.iso_serv_code_dict + row.identifier, self.iso_serv_code_dict ) cur_row += 1 if scan_mode == ScanMode.IDEN: @@ -208,9 +208,7 @@ def sum_sheet_fill_index( index_name = CellCnt.no_ent else: index_name = f"0x{int(row.identifier):04X}" - self.worksheet.cell( - cur_row, self.start_col - ).value = index_name + self.worksheet.cell(cur_row, self.start_col).value = index_name cur_row += 1 if row.subfunc != -1: # service has subfunction and identifer @@ -222,7 +220,7 @@ def sum_sheet_fill_index( self.worksheet.cell(cur_row, cur_col).font = Font( name=XlDesign.font_index ) - if -1 in entries_vec['subfunc']: + if -1 in entries_vec["subfunc"]: # has no sub function cur_col += 1 else: @@ -374,7 +372,7 @@ def add_failure_sheet( + CellCnt.sess_unscn ) if sess_lu_vec.size > 0: - if not sess in sess_lu_vec: + if sess not in sess_lu_vec: self.worksheet.cell(cur_row, cur_col).value = ( str(self.worksheet.cell(cur_row, cur_col).value) + "\n" From 234d48077e3ce82b93a97cdcbc8340b22ab49850 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 5 Jul 2022 08:19:52 +0200 Subject: [PATCH 04/26] add type stubs --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 6f2e456c8..1b1217e4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,8 @@ reuse = "^4.0" construct-typing = ">=0.5.2,<0.7.0" pytest-cov = ">=4,<6" ruff = "^0.5.0" +pandas-stubs = "^1.4.3" +openpyxl-stubs = "^0.1.21" [tool.poetry.scripts] "gallia" = "gallia.cli:main" From b89106572f189a00b1a4c653e863af9cbe74e0c2 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 5 Jul 2022 08:45:57 +0200 Subject: [PATCH 05/26] fix typing --- src/gallia/analyzer/categorizer.py | 8 +++-- src/gallia/analyzer/exceptions.py | 4 +-- src/gallia/analyzer/operator.py | 54 ++++++++++------------------- src/gallia/analyzer/reporter.py | 6 ++-- src/gallia/analyzer/xl_generator.py | 18 +++++----- 5 files changed, 37 insertions(+), 53 deletions(-) diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index ae90e69bd..e7f312481 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -95,7 +95,7 @@ def categorize_iden( categorize failures for scan_identifier. """ try: - serv_vec = pd.unique(raw_df[ColNm.serv]) + serv_vec = np.unique(raw_df[ColNm.serv]) if not serv_vec.size == 1: self.log("more than one service in a run", True) return pd.DataFrame() @@ -124,7 +124,7 @@ def categorize_iden( return pd.DataFrame() return raw_df - def check_sess_alwd(self, serv: int, sess: int, op_mode: OpMode, ecu_mode) -> bool: + def check_sess_alwd(self, serv: int, sess: int, op_mode: OpMode, ecu_mode: int) -> bool: """ check if a certain diagnostic session is available or supported for a certain service at given analysis mode. @@ -149,7 +149,7 @@ def check_resp_alwd(self, serv: int, resp: int) -> bool: + self.iso_supp_err_for_all_vec.tolist() ) - def check_sbfn_alwd(self, serv: int, sbfn: int, op_mode: OpMode, ecu_mode) -> bool: + def check_sbfn_alwd(self, serv: int, sbfn: int, op_mode: OpMode, ecu_mode: int) -> bool: """ check if a certain sub-function is available or supported for a certain service at given analysis mode. @@ -275,6 +275,8 @@ def get_fail_iden( supp_serv_vec = self.supp_serv_ven_vec if op_mode == OpMode.ISO: supp_serv_vec = self.supp_serv_iso_vec + else: + raise RuntimeError(f'Unsupported op_mode: {op_mode}') cond_serv_supp = serv in supp_serv_vec cond_resp_alwd = self.check_resp_alwd(serv, resp) diff --git a/src/gallia/analyzer/exceptions.py b/src/gallia/analyzer/exceptions.py index 3b9ee6d9f..8d767a237 100644 --- a/src/gallia/analyzer/exceptions.py +++ b/src/gallia/analyzer/exceptions.py @@ -8,7 +8,7 @@ class EmptyTableException(Exception): exception class for empty table error """ - def __init__(self): + def __init__(self) -> None: super().__init__("Empty Table.") @@ -17,5 +17,5 @@ class ColumnMismatchException(Exception): exception class for column mismatch """ - def __init__(self): + def __init__(self) -> None: super().__init__("Columns Mismatch.") diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 1d55d254e..0a71e4f01 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -3,9 +3,9 @@ """ import json from json.decoder import JSONDecodeError -from typing import Tuple from sqlite3 import OperationalError from itertools import chain +from typing import cast import numpy as np import pandas as pd from pandas.core.indexing import IndexingError @@ -35,8 +35,8 @@ def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): self.ref_ven_df = pd.DataFrame() self.supp_serv_ven_vec = np.array([]) self.sess_code_vec = np.array([]) - self.sess_code_dict = dict() - self.sess_name_dict = dict() + self.sess_code_dict: dict[int, str] = {} + self.sess_name_dict: dict[str, int] = {} self.load_all_dicts() if self.connect_db(): self.load_ref_iso() @@ -58,7 +58,7 @@ def get_runs(self) -> np.ndarray: get all a numpy array of all runs in the database. """ if self.load_meta(force=True): - return self.run_meta_df.index + return self.run_meta_df.index.to_numpy() return np.array([]) def get_scan_mode(self, run: int) -> ScanMode: @@ -88,10 +88,10 @@ def get_sid(self, run: int) -> int: return -1 raw_df = self.read_run_db(TblNm.iden, run) self.check_df(raw_df, TblStruct.iden) - serv_vec = pd.unique(raw_df[ColNm.serv]) + serv_vec = np.unique(raw_df[ColNm.serv]) if serv_vec.shape[0] > 1: self.log("A run has more than one Service ID.", True) - serv_ser = raw_df[ColNm.serv].mode() + serv_ser = raw_df[ColNm.serv].mode(dropna=True) if serv_ser.shape[0] > 1: self.log("A run has more than one most frequent Service ID.", True) except ( @@ -112,7 +112,7 @@ def get_ecu_mode(self, run: int) -> int: if not self.load_meta(): return -1 try: - ecu_mode = self.run_meta_df.loc[run, ColNm.ecu_mode] + ecu_mode = cast(int, self.run_meta_df.loc[run, ColNm.ecu_mode]) return ecu_mode except (KeyError, IndexingError, AttributeError) as exc: self.log("getting ECU mode failed", True, exc) @@ -135,7 +135,7 @@ def get_sess_lu(self) -> np.ndarray: try: lu_df = self.read_db(TblNm.ven_lu) self.check_df(lu_df, TblStruct.ven_lu) - sess_vec = pd.unique(lu_df[ColNm.sess]) + sess_vec = np.unique(lu_df[ColNm.sess]) except ( KeyError, IndexingError, @@ -173,36 +173,18 @@ def get_ref_df_from_json(self, path: str) -> pd.DataFrame: return pd.DataFrame() return ref_df - def get_dft_err_df(self, run: int) -> Tuple[pd.DataFrame, np.ndarray]: - """ - get data frame that shows most common error(default error) - for each diagnostic session regarding a run. - """ - try: - scan_mode = self.get_scan_mode(run) - if scan_mode == ScanMode.SERV: - raw_df = self.read_run_db(TblNm.serv, run) - self.check_df(raw_df, TblStruct.serv) - else: - raw_df = self.read_run_db(TblNm.iden, run) - self.check_df(raw_df, TblStruct.iden) - except (EmptyTableException, ColumnMismatchException) as exc: - self.log("getting default error data frame failed", True, exc) - return pd.DataFrame() - return self.get_dft_err_df_from_raw(raw_df) - def get_dft_err_df_from_raw(self, raw_df: pd.DataFrame) -> pd.DataFrame: """ get summarized data frame that shows most common error(default error) for each diagnostic session from raw data frame. """ try: - sess_vec = pd.unique(raw_df[ColNm.sess]) + sess_vec = np.unique(raw_df[ColNm.sess]) dft_err_df = pd.DataFrame([], index=[ColNm.dft], columns=sess_vec) for sess in sess_vec: cond = raw_df[ColNm.sess] == sess dft_err_df.loc[ColNm.dft, sess] = raw_df.loc[cond, ColNm.resp].mode()[0] - dft_err_df.attrs[ColNm.serv] = list(pd.unique(raw_df[ColNm.serv])) + dft_err_df.attrs[ColNm.serv] = list(np.unique(raw_df[ColNm.serv])) except ( KeyError, IndexingError, @@ -224,7 +206,7 @@ def get_pos_res(self, search_id: int) -> str: FROM "{TblNm.scan_result}" WHERE "{ColNm.id}" = {str(search_id)}; """ res_df = self.get_df_by_query(res_sql) - resp = res_df.iloc[0, 0] + resp = cast(str, res_df.iloc[0, 0]) except (KeyError, IndexingError, AttributeError) as exc: self.log("getting positive response failed", True, exc) return "" @@ -281,7 +263,7 @@ def load_ven_lu(self, force: bool = False, num_modes: int = NUM_ECU_MODES) -> bo try: lu_df = self.read_db(TblNm.ven_lu) self.check_df(lu_df, TblStruct.ven_lu) - supp_serv_vec = np.sort(pd.unique(lu_df[ColNm.serv])) + supp_serv_vec = np.sort(np.unique(lu_df[ColNm.serv])) mode_vec = np.arange(num_modes) ven_lu_dict = {} self.num_modes = 0 @@ -295,21 +277,21 @@ def load_ven_lu(self, force: bool = False, num_modes: int = NUM_ECU_MODES) -> bo for serv in supp_serv_vec: sess_ls = list( np.sort( - pd.unique( + np.unique( loi_df.loc[loi_df[ColNm.serv] == serv, ColNm.sess] ) ) ) sbfn_ls = list( np.sort( - pd.unique( + np.unique( loi_df.loc[loi_df[ColNm.serv] == serv, ColNm.sbfn] ) ) ) iden_ls = list( np.sort( - pd.unique( + np.unique( loi_df.loc[ loi_df[ColNm.serv] == serv, ColNm.iden, @@ -323,7 +305,7 @@ def load_ven_lu(self, force: bool = False, num_modes: int = NUM_ECU_MODES) -> bo ) ven_lu_dict[mode] = ref_df.T ven_lu_df = pd.concat(ven_lu_dict.values(), axis=1, keys=ven_lu_dict.keys()) - self.ref_ven_df: pd.DataFrame = ven_lu_df + self.ref_ven_df = ven_lu_df self.supp_serv_ven_vec = np.sort(np.array(ven_lu_df.index)) except ( KeyError, @@ -413,7 +395,7 @@ def load_lu_iden(self, serv: int, ecu_mode: int) -> bool: (raw_df[ColNm.serv] == serv) & (raw_df[ColNm.ecu_mode] == ecu_mode) ].copy() self.lu_iden_df = pd.DataFrame( - pd.unique( + np.unique( list( zip( serv_df[ColNm.sess], @@ -503,7 +485,7 @@ def prepare_alwd_sess_boot( ven_lu_df[ColNm.combi] = list( zip(ven_lu_df[ColNm.serv], ven_lu_df[ColNm.sess], ven_lu_df[ColNm.boot]) ) - entries_vec = pd.unique(ven_lu_df[ColNm.combi]) + entries_vec = np.unique(ven_lu_df[ColNm.combi]) for entry in entries_vec: pair_ls.append((entry[0], entry[1], entry[2])) pair_df = pd.DataFrame( diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index de05b31ec..4166a0867 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -306,7 +306,7 @@ def load_sid_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool: ) if ecu_mode != -1: cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode - self.abn_serv_vec = np.sort(pd.unique(raw_df.loc[cond_abn, ColNm.serv])) + self.abn_serv_vec = np.sort(np.unique(raw_df.loc[cond_abn, ColNm.serv])) except (KeyError, IndexingError, AttributeError) as exc: self.log("loading services of interest from data frame failed", True, exc) return False @@ -331,7 +331,7 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool load identifiers of interest from input raw data frame. """ try: - serv_vec = np.sort(pd.unique(raw_df[ColNm.serv])) + serv_vec = np.sort(np.unique(raw_df[ColNm.serv])) if not serv_vec.size == 1: self.log("more than one service in a run", True) return False @@ -348,7 +348,7 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool ) if ecu_mode != -1: cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode - self.abn_iden_vec = np.sort(pd.unique(raw_df.loc[cond_abn, ColNm.iden])) + self.abn_iden_vec = np.sort(np.unique(raw_df.loc[cond_abn, ColNm.iden])) except (KeyError, IndexingError, AttributeError) as exc: self.log( "loading identifiers of interest from data frame failed", True, exc diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index 98cbb0a74..253ed66e9 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -32,7 +32,7 @@ class ExcelGenerator(Operator): def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): Operator.__init__(self, path, log_mode) self.msg_head = "[ExcelGenerator] " - self.workbook = self.workbook = op.Workbook() + self.workbook: op.Workbook = op.Workbook() self.worksheet: Any self.load_color_code(SrcPath.err_src) @@ -77,7 +77,7 @@ def save_close_xl(self, out_path: str) -> bool: except (InvalidFileException, WorkbookAlreadySaved) as exc: self.log("saving EXCEL failed", True, exc) return False - return + return True def add_sum_sheet_serv( self, raw_df: pd.DataFrame, entries_vec: np.ndarray, sheet_name: str = "" @@ -121,8 +121,8 @@ def add_sum_sheet_iden( try: self.worksheet = self.workbook.create_sheet(sheet_name) ref_col = ColNm.iden - serv = pd.unique(raw_df[ColNm.serv])[0] - sbfn_vec = np.sort(pd.unique(raw_df[ColNm.sbfn])) + serv = np.unique(raw_df[ColNm.serv])[0] + sbfn_vec = np.sort(np.unique(raw_df[ColNm.sbfn])) dft_err_df = self.get_dft_err_df_from_raw(raw_df) cur_row, cur_col = self.sum_sheet_fill_origin(ScanMode.IDEN, serv, sbfn_vec) cur_row, cur_col = self.sum_sheet_fill_index( @@ -288,7 +288,7 @@ def sum_sheet_fill_resp( if scan_mode == ScanMode.SERV: sbfn_vec = np.arange(1) if scan_mode == ScanMode.IDEN: - sbfn_vec = np.sort(pd.unique(raw_df[ColNm.sbfn])) + sbfn_vec = np.sort(np.unique(raw_df[ColNm.sbfn])) for sess in sess_vec: if dft_err_df[sess][0] == -1: continue @@ -344,7 +344,7 @@ def add_failure_sheet( width = XlDesign.dim_wide if scan_mode == ScanMode.IDEN: fail_vec = np.array([Failure.UNDOC_IDEN, Failure.MISS_IDEN]) - sbfn_vec = np.sort(pd.unique(raw_df[ColNm.sbfn])) + sbfn_vec = np.sort(np.unique(raw_df[ColNm.sbfn])) width = XlDesign.dim_middle cur_row = self.start_row cur_col = self.start_col @@ -390,7 +390,7 @@ def add_failure_sheet( lambda x, fl=fail: self.check_fail(x, fl) ) & (raw_df[ColNm.sess] == sess) if scan_mode == ScanMode.SERV: - serv_vec = np.sort(pd.unique(raw_df.loc[cond, ColNm.serv])) + serv_vec = np.sort(np.unique(raw_df.loc[cond, ColNm.serv])) for serv in serv_vec: self.worksheet.cell( cur_row, cur_col @@ -406,7 +406,7 @@ def add_failure_sheet( zip(raw_df[ColNm.iden], raw_df[ColNm.sbfn]) ) iden_sbfn_vec = np.sort( - pd.unique(raw_df.loc[cond, ColNm.combi]) + np.unique(raw_df.loc[cond, ColNm.combi]) ) for iden_sbfn in iden_sbfn_vec: iden = iden_sbfn[0] @@ -422,7 +422,7 @@ def add_failure_sheet( cur_row += 1 cur_col += 1 else: - iden_vec = np.sort(pd.unique(raw_df.loc[cond, ColNm.iden])) + iden_vec = np.sort(np.unique(raw_df.loc[cond, ColNm.iden])) for iden in iden_vec: if iden == -1: entry = CellCnt.no_ent From cb1ccbbcea8179b1db790b0fba7628d5acf05aa0 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Mon, 11 Jul 2022 11:01:50 +0200 Subject: [PATCH 06/26] mark analyze scan-services with OpMode.VEN_SPEC as NotImplemented --- src/gallia/analyzer/analyzer.py | 1 + src/gallia/analyzer/operator.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py index 5ef5d9fc9..9bddf17b4 100644 --- a/src/gallia/analyzer/analyzer.py +++ b/src/gallia/analyzer/analyzer.py @@ -119,6 +119,7 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: IndexingError, AttributeError, JSONDecodeError, + NotImplementedError, ) as exc: self.log("analyzing scan_service in place failed", True, exc) return False diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 0a71e4f01..4ba7fdcc1 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -441,7 +441,8 @@ def prepare_alwd_all( if op_mode == OpMode.ISO: ref_df = self.ref_iso_df if op_mode == OpMode.VEN_SPEC: - ref_df = self.ref_ven_df[ecu_mode] + # ref_df = self.ref_ven_df[ecu_mode] + raise NotImplementedError('OpMode.VEN_SPEC not yet supported') if not self.prepare_alwd_res(): return False if not self.prepare_alwd_sess_boot(op_mode, ecu_mode): From 397dbb6f26f6c4bdecf3d6ed94854e8d3ee65248 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Mon, 11 Jul 2022 12:23:33 +0200 Subject: [PATCH 07/26] fix sum_sheet_fill_index to support identifier and subFunc --- src/gallia/analyzer/xl_generator.py | 31 +++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index 253ed66e9..cd62a1c31 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -93,7 +93,10 @@ def add_sum_sheet_serv( dft_err_df = self.get_dft_err_df_from_raw(raw_df) cur_row, cur_col = self.sum_sheet_fill_origin(ScanMode.SERV) cur_row, cur_col = self.sum_sheet_fill_index( - cur_row, cur_col, entries_vec, ScanMode.SERV + cur_row, + cur_col, + raw_df[raw_df["service"].isin(entries_vec)], + ScanMode.SERV ) cur_row, cur_col = self.sum_sheet_fill_sess(cur_row, cur_col, dft_err_df) cur_row, cur_col = self.sum_sheet_fill_resp( @@ -195,12 +198,24 @@ def sum_sheet_fill_index( """ fill index column in summary sheet. """ - entries_vec = entries_vec.drop_duplicates(["subfunc", "identifier"]) + has_sub_func = False + has_id = False + if scan_mode == ScanMode.SERV: + entries_vec = entries_vec.drop_duplicates(["service"]) + has_id = True + elif scan_mode == ScanMode.IDEN: + entries_vec = entries_vec.drop_duplicates(["subfunc", "identifier"]) + if not entries_vec[entries_vec["subfunc"] != -1].empty: + has_sub_func = True + if not entries_vec[entries_vec["identifier"] != -1].empty: + has_id = True + else: + raise NotImplementedError(f'ScanMode not supported: {scan_mode}') try: for _, row in entries_vec.iterrows(): if scan_mode == ScanMode.SERV: self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( - row.identifier, self.iso_serv_code_dict + row.service, self.iso_serv_code_dict ) cur_row += 1 if scan_mode == ScanMode.IDEN: @@ -220,16 +235,12 @@ def sum_sheet_fill_index( self.worksheet.cell(cur_row, cur_col).font = Font( name=XlDesign.font_index ) - if -1 in entries_vec["subfunc"]: - # has no sub function - cur_col += 1 - else: - # has sub function and identifer - cur_col += 2 - cur_row = self.start_row except (KeyError, AttributeError) as exc: self.log("filling index of summary sheet failed", True, exc) return self.start_row, self.start_col + 1 + + cur_col += int(has_id) + int(has_sub_func) + cur_row = self.start_row return cur_row, cur_col def sum_sheet_fill_sess( From 301749dece85a93f46e794412751f9af33ef1d21 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Mon, 11 Jul 2022 16:31:32 +0200 Subject: [PATCH 08/26] fix type annotation --- src/gallia/analyzer/reporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index 4166a0867..9c1557624 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -233,7 +233,7 @@ def set_path_prefix(self, path: str = "") -> None: self.out_path = os.path.expanduser(path + "/") def get_path( - self, suffix: int = "", ext: str = ".xlsx", rm_if_exists: bool = False + self, suffix: str = "", ext: str = ".xlsx", rm_if_exists: bool = False ) -> str: """ get path for EXCEL report file by combining path prefix, From 204e3b93bff8344254642e70c45bf8b5995ca9c1 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Thu, 14 Jul 2022 08:42:42 +0200 Subject: [PATCH 09/26] fix type annotation --- src/gallia/analyzer/categorizer.py | 37 ++++++++++++++++++++--------- src/gallia/analyzer/extractor.py | 4 +++- src/gallia/analyzer/main.py | 8 ++++--- src/gallia/analyzer/operator.py | 13 +++++++--- src/gallia/analyzer/reporter.py | 26 +++++++++++++------- src/gallia/analyzer/xl_generator.py | 17 ++++++------- 6 files changed, 68 insertions(+), 37 deletions(-) diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index e7f312481..fc75d25df 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -3,6 +3,8 @@ """ from sqlite3 import OperationalError +from typing import cast + import numpy as np import pandas as pd from gallia.analyzer.analyzer import Analyzer @@ -124,18 +126,24 @@ def categorize_iden( return pd.DataFrame() return raw_df - def check_sess_alwd(self, serv: int, sess: int, op_mode: OpMode, ecu_mode: int) -> bool: + def check_sess_alwd( + self, serv: int, sess: int, op_mode: OpMode, ecu_mode: int + ) -> bool: """ check if a certain diagnostic session is available or supported for a certain service at given analysis mode. """ if op_mode == OpMode.VEN_SPEC: - ref_df = self.ref_ven_df[ecu_mode] + ref_df = cast( + pd.DataFrame, self.ref_ven_df[ecu_mode] + ) # this a nested DataFrame, which yields a DataFrame per ecu_mode if op_mode == OpMode.ISO: ref_df = self.ref_iso_df if serv not in ref_df.index: return False - return sess in ref_df.loc[serv, ColNm.sess] + return sess in cast( + list[int], ref_df.loc[serv, ColNm.sess] # The session column is a list of supported session IDs + ) def check_resp_alwd(self, serv: int, resp: int) -> bool: """ @@ -149,18 +157,24 @@ def check_resp_alwd(self, serv: int, resp: int) -> bool: + self.iso_supp_err_for_all_vec.tolist() ) - def check_sbfn_alwd(self, serv: int, sbfn: int, op_mode: OpMode, ecu_mode: int) -> bool: + def check_sbfn_alwd( + self, serv: int, sbfn: int, op_mode: OpMode, ecu_mode: int + ) -> bool: """ check if a certain sub-function is available or supported for a certain service at given analysis mode. """ if op_mode == OpMode.VEN_SPEC: - ref_df = self.ref_ven_df[ecu_mode] + ref_df = cast( + pd.DataFrame, self.ref_ven_df[ecu_mode] + ) # this a nested DataFrame, which yields a DataFrame per ecu_mode if op_mode == OpMode.ISO: ref_df = self.ref_iso_df if serv not in ref_df.index: return False - return sbfn in ref_df.loc[serv, ColNm.sbfn] + return sbfn in cast( + list[int], ref_df.loc[serv, ColNm.sbfn] # The sub-function column is a list of supported sub-functions + ) def get_fail_serv( self, @@ -273,10 +287,10 @@ def get_fail_iden( """ if op_mode == OpMode.VEN_SPEC: supp_serv_vec = self.supp_serv_ven_vec - if op_mode == OpMode.ISO: + elif op_mode == OpMode.ISO: supp_serv_vec = self.supp_serv_iso_vec else: - raise RuntimeError(f'Unsupported op_mode: {op_mode}') + raise RuntimeError(f"Unsupported op_mode: {op_mode}") cond_serv_supp = serv in supp_serv_vec cond_resp_alwd = self.check_resp_alwd(serv, resp) @@ -375,13 +389,14 @@ def get_fail_iden( if resp == UDSErrorCodes.subFunctionNotSupportedInActiveSession: return Failure.UNDOC_IDEN_B + # TODO: What is this case about? + if resp == 0: + return Failure.UNDOC_IDEN_E + if cond_resp_alwd: return Failure.UNDOC_IDEN_C if not cond_resp_alwd: return Failure.UNDOC_IDEN_D - if resp == 0: - return Failure.UNDOC_IDEN_E - return Failure.UNKNOWN diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index 0584db948..2a64a12cb 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -180,7 +180,9 @@ def check_boot(self, run: int) -> bool: if boot_df.shape[0] == 0: return False boot_types_vec = np.array([0, 1]) # vendor-specific - boot_ok = boot_df[ColNm.boot].apply(lambda x: x in boot_types_vec).all() + boot_ok = bool( + boot_df[ColNm.boot].apply(lambda x: x in boot_types_vec).all() + ) if not boot_ok: self.log("boot information not complete.", True) except (KeyError, AttributeError, OperationalError) as exc: diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 3840894ff..9db10a8da 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -114,11 +114,13 @@ def main(self, args: Namespace) -> None: if analyze_on: if categorizer_on: - analyzer = Categorizer(db_path, log_mode) + categorizer = Categorizer(db_path, log_mode) + an_opt = categorizer.get_op_mode(iso_on) + categorizer.analyze(runs_vec, an_opt) else: analyzer = Analyzer(db_path, log_mode, debug_on) - an_opt = analyzer.get_op_mode(iso_on) - analyzer.analyze(runs_vec, an_opt) + an_opt = analyzer.get_op_mode(iso_on) + analyzer.analyze(runs_vec, an_opt) if t_analyze_on: if t_prec > 0: diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 4ba7fdcc1..440ed2da2 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -112,7 +112,13 @@ def get_ecu_mode(self, run: int) -> int: if not self.load_meta(): return -1 try: - ecu_mode = cast(int, self.run_meta_df.loc[run, ColNm.ecu_mode]) + _ecu_mode = self.run_meta_df.loc[run, ColNm.ecu_mode] + if isinstance(_ecu_mode, int): + ecu_mode = _ecu_mode + else: + # ecu_mode must be positive integer in the current implementation + # we use the dummy mode 0 if ECU does not use ecu_modes at all + ecu_mode = 0 return ecu_mode except (KeyError, IndexingError, AttributeError) as exc: self.log("getting ECU mode failed", True, exc) @@ -441,8 +447,9 @@ def prepare_alwd_all( if op_mode == OpMode.ISO: ref_df = self.ref_iso_df if op_mode == OpMode.VEN_SPEC: - # ref_df = self.ref_ven_df[ecu_mode] - raise NotImplementedError('OpMode.VEN_SPEC not yet supported') + ref_df = cast( + pd.DataFrame, self.ref_ven_df[ecu_mode] + ) # this a nested DataFrame, which yields a DataFrame per ecu_mode if not self.prepare_alwd_res(): return False if not self.prepare_alwd_sess_boot(op_mode, ecu_mode): diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index 9c1557624..16cbe43fd 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -2,6 +2,8 @@ gallia-analyze Reporter module """ import os +from typing import cast + import numpy as np import pandas as pd from pandas.core.indexing import IndexingError @@ -298,11 +300,14 @@ def load_sid_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool: sess_vec = np.array(dft_err_df.columns) raw_df[ColNm.combi] = list(zip(raw_df[ColNm.sess], raw_df[ColNm.resp])) for sess in sess_vec: - cond_abn |= raw_df[ColNm.combi].apply( - lambda x, s=sess: (x[0] == s) - and (x[1] != dft_err_ser[s]) - and (x[1] != -1) - and (x[1] != 0) + cond_abn |= cast( + pd.Series, + raw_df[ColNm.combi].apply( + lambda x, s=sess: (x[0] == s) + and (x[1] != dft_err_ser[s]) + and (x[1] != -1) + and (x[1] != 0) + ), ) if ecu_mode != -1: cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode @@ -341,10 +346,13 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool sess_vec = np.array(dft_err_df.columns) raw_df[ColNm.combi] = list(zip(raw_df[ColNm.sess], raw_df[ColNm.resp])) for sess in sess_vec: - cond_abn |= raw_df[ColNm.combi].apply( - lambda x, s=sess: (x[0] == s) - and (x[1] != dft_err_ser[s]) - and (x[1] != -1) + cond_abn |= cast( + pd.Series, + raw_df[ColNm.combi].apply( + lambda x, s=sess: (x[0] == s) + and (x[1] != dft_err_ser[s]) + and (x[1] != -1) + ), ) if ecu_mode != -1: cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index cd62a1c31..fba72005d 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -96,7 +96,7 @@ def add_sum_sheet_serv( cur_row, cur_col, raw_df[raw_df["service"].isin(entries_vec)], - ScanMode.SERV + ScanMode.SERV, ) cur_row, cur_col = self.sum_sheet_fill_sess(cur_row, cur_col, dft_err_df) cur_row, cur_col = self.sum_sheet_fill_resp( @@ -210,7 +210,7 @@ def sum_sheet_fill_index( if not entries_vec[entries_vec["identifier"] != -1].empty: has_id = True else: - raise NotImplementedError(f'ScanMode not supported: {scan_mode}') + raise NotImplementedError(f"ScanMode not supported: {scan_mode}") try: for _, row in entries_vec.iterrows(): if scan_mode == ScanMode.SERV: @@ -361,16 +361,16 @@ def add_failure_sheet( cur_col = self.start_col sess_lu_vec = self.get_sess_lu() for fail in fail_vec: - if (fail == Failure.UNDOC_SERV) or (fail == Failure.UNDOC_IDEN): + if fail in [Failure.UNDOC_SERV, Failure.UNDOC_IDEN]: sheet_name = ShtNm.undoc - if (fail == Failure.MISS_SERV) or (fail == Failure.MISS_IDEN): + if fail in [Failure.MISS_SERV, Failure.MISS_IDEN]: sheet_name = ShtNm.miss self.worksheet = self.workbook.create_sheet( f"{sheet_name}{sheet_name_suffix}" ) self.worksheet.freeze_panes = self.worksheet.cell( self.start_row + 1, self.start_col - ).coordinate + ).coordinate # type: ignore # This seems like an error in the type hints for sess in sess_vec: self.set_cell_width(cur_col, width) self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( @@ -509,10 +509,7 @@ def check_fail(self, fail: int, fail_class: Failure) -> bool: """ check if given failure belongs to given faliure class. """ - if (fail // FAIL_CLS_CAP) == (fail_class // FAIL_CLS_CAP): - return True - else: - return False + return (fail // FAIL_CLS_CAP) == (fail_class // FAIL_CLS_CAP) def get_code_text(self, code: int, ref: Dict[int, str]) -> str: """ @@ -523,7 +520,7 @@ def get_code_text(self, code: int, ref: Dict[int, str]) -> str: txt = ref[code] except KeyError: txt = "Unknown Code" - if code == -1 or code == 0: + if code in [-1, 0]: code_txt = f"{txt}" else: code_txt = f"0x{int(code):02X} {txt}" From d8bbdfdaa397481139140e1e3e0a82d4e6bc9022 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Fri, 15 Jul 2022 08:47:51 +0200 Subject: [PATCH 10/26] fix code format --- src/gallia/analyzer/categorizer.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index fc75d25df..0cce2322e 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -142,7 +142,10 @@ def check_sess_alwd( if serv not in ref_df.index: return False return sess in cast( - list[int], ref_df.loc[serv, ColNm.sess] # The session column is a list of supported session IDs + list[int], + ref_df.loc[ + serv, ColNm.sess + ], # The session column is a list of supported session IDs ) def check_resp_alwd(self, serv: int, resp: int) -> bool: @@ -173,7 +176,10 @@ def check_sbfn_alwd( if serv not in ref_df.index: return False return sbfn in cast( - list[int], ref_df.loc[serv, ColNm.sbfn] # The sub-function column is a list of supported sub-functions + list[int], + ref_df.loc[ + serv, ColNm.sbfn + ], # The sub-function column is a list of supported sub-functions ) def get_fail_serv( From 5ba602a600f8378de116019f30f6c525f8ecb98f Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Fri, 15 Jul 2022 09:28:46 +0200 Subject: [PATCH 11/26] add SPDX-License-Identifier --- src/gallia/analyzer/__init__.py | 3 +++ src/gallia/analyzer/analyzer.py | 4 ++++ src/gallia/analyzer/arg_help.py | 4 ++++ src/gallia/analyzer/categorizer.py | 4 ++++ src/gallia/analyzer/config.py | 4 ++++ src/gallia/analyzer/constants.py | 4 ++++ src/gallia/analyzer/db_handler.py | 4 ++++ src/gallia/analyzer/exceptions.py | 4 ++++ src/gallia/analyzer/extractor.py | 4 ++++ src/gallia/analyzer/failure.py | 4 ++++ src/gallia/analyzer/iso_def.py | 4 ++++ src/gallia/analyzer/json/conditions.json.license | 3 +++ src/gallia/analyzer/json/responses.json.license | 3 +++ src/gallia/analyzer/json/uds_iso_standard.json.license | 3 +++ src/gallia/analyzer/main.py | 5 ++++- src/gallia/analyzer/mode_config.py | 4 ++++ src/gallia/analyzer/name_config.py | 4 ++++ src/gallia/analyzer/naming_conventions.txt.license | 3 +++ src/gallia/analyzer/operator.py | 4 ++++ src/gallia/analyzer/reporter.py | 4 ++++ src/gallia/analyzer/time_analyzer.py | 4 ++++ src/gallia/analyzer/xl_generator.py | 4 ++++ 22 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 src/gallia/analyzer/json/conditions.json.license create mode 100644 src/gallia/analyzer/json/responses.json.license create mode 100644 src/gallia/analyzer/json/uds_iso_standard.json.license create mode 100644 src/gallia/analyzer/naming_conventions.txt.license diff --git a/src/gallia/analyzer/__init__.py b/src/gallia/analyzer/__init__.py index e69de29bb..eea379e46 100644 --- a/src/gallia/analyzer/__init__.py +++ b/src/gallia/analyzer/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py index 9bddf17b4..4e90e8afb 100644 --- a/src/gallia/analyzer/analyzer.py +++ b/src/gallia/analyzer/analyzer.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Analyzer module """ diff --git a/src/gallia/analyzer/arg_help.py b/src/gallia/analyzer/arg_help.py index 848b18325..c8a4ca76c 100644 --- a/src/gallia/analyzer/arg_help.py +++ b/src/gallia/analyzer/arg_help.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze module for argument help texts """ diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index 0cce2322e..cccfcf4da 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Categorizer module """ diff --git a/src/gallia/analyzer/config.py b/src/gallia/analyzer/config.py index 1398ed05e..d162a3536 100644 --- a/src/gallia/analyzer/config.py +++ b/src/gallia/analyzer/config.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Config module """ diff --git a/src/gallia/analyzer/constants.py b/src/gallia/analyzer/constants.py index 1f5aadc53..2f896f315 100644 --- a/src/gallia/analyzer/constants.py +++ b/src/gallia/analyzer/constants.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Const module """ diff --git a/src/gallia/analyzer/db_handler.py b/src/gallia/analyzer/db_handler.py index 4bba207f8..cf8577d8b 100644 --- a/src/gallia/analyzer/db_handler.py +++ b/src/gallia/analyzer/db_handler.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Database Handler module """ diff --git a/src/gallia/analyzer/exceptions.py b/src/gallia/analyzer/exceptions.py index 8d767a237..6b5d29b55 100644 --- a/src/gallia/analyzer/exceptions.py +++ b/src/gallia/analyzer/exceptions.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Exceptions module """ diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index 2a64a12cb..b3b9bf5fc 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Extractor module """ diff --git a/src/gallia/analyzer/failure.py b/src/gallia/analyzer/failure.py index fb5f46a1d..b378fac6e 100644 --- a/src/gallia/analyzer/failure.py +++ b/src/gallia/analyzer/failure.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Failure module """ diff --git a/src/gallia/analyzer/iso_def.py b/src/gallia/analyzer/iso_def.py index cb661cfd9..72572f3c6 100644 --- a/src/gallia/analyzer/iso_def.py +++ b/src/gallia/analyzer/iso_def.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze ISO DEF module """ diff --git a/src/gallia/analyzer/json/conditions.json.license b/src/gallia/analyzer/json/conditions.json.license new file mode 100644 index 000000000..fd78c804a --- /dev/null +++ b/src/gallia/analyzer/json/conditions.json.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2022 AISEC Pentesting Team + +SPDX-License-Identifier: Apache-2.0 diff --git a/src/gallia/analyzer/json/responses.json.license b/src/gallia/analyzer/json/responses.json.license new file mode 100644 index 000000000..fd78c804a --- /dev/null +++ b/src/gallia/analyzer/json/responses.json.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2022 AISEC Pentesting Team + +SPDX-License-Identifier: Apache-2.0 diff --git a/src/gallia/analyzer/json/uds_iso_standard.json.license b/src/gallia/analyzer/json/uds_iso_standard.json.license new file mode 100644 index 000000000..fd78c804a --- /dev/null +++ b/src/gallia/analyzer/json/uds_iso_standard.json.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2022 AISEC Pentesting Team + +SPDX-License-Identifier: Apache-2.0 diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 9db10a8da..e7778997a 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -1,4 +1,7 @@ -#!/usr/bin/env python3 +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze main script """ diff --git a/src/gallia/analyzer/mode_config.py b/src/gallia/analyzer/mode_config.py index c1f2642e4..6e674f3a6 100644 --- a/src/gallia/analyzer/mode_config.py +++ b/src/gallia/analyzer/mode_config.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Mode Config module """ diff --git a/src/gallia/analyzer/name_config.py b/src/gallia/analyzer/name_config.py index 654124a05..f9039dfca 100644 --- a/src/gallia/analyzer/name_config.py +++ b/src/gallia/analyzer/name_config.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Name Config module """ diff --git a/src/gallia/analyzer/naming_conventions.txt.license b/src/gallia/analyzer/naming_conventions.txt.license new file mode 100644 index 000000000..fd78c804a --- /dev/null +++ b/src/gallia/analyzer/naming_conventions.txt.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2022 AISEC Pentesting Team + +SPDX-License-Identifier: Apache-2.0 diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 440ed2da2..a4c7754c9 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Operator module """ diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index 16cbe43fd..a4702eb56 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Reporter module """ diff --git a/src/gallia/analyzer/time_analyzer.py b/src/gallia/analyzer/time_analyzer.py index 8d45fd4dc..d770a46bc 100644 --- a/src/gallia/analyzer/time_analyzer.py +++ b/src/gallia/analyzer/time_analyzer.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze Time Analyzer module """ diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index fba72005d..6967559fd 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: AISEC Pentesting Team +# +# SPDX-License-Identifier: Apache-2.0 + """ gallia-analyze EXCEL Generator module """ From cd962eb2b6b864e84aae7d9f0a20ab7270c10edd Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 26 Jul 2022 08:13:34 +0200 Subject: [PATCH 12/26] Add artifacts dir as temporary fix --- src/gallia/analyzer/main.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index e7778997a..6f8897309 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -5,9 +5,15 @@ """ gallia-analyze main script """ +import os from argparse import Namespace import sys import time +from pathlib import Path +from secrets import token_urlsafe +from tempfile import gettempdir +from typing import Optional + import numpy as np from gallia.analyzer.operator import Operator from gallia.analyzer.analyzer import Analyzer @@ -30,6 +36,25 @@ class AnalyzerMain(Script): + def __init__(self): + super().__init__() + self.artifacts_dir: Path + + def prepare_artifactsdir(self, path: Optional[Path]) -> Path: + if path is None: + base = Path(gettempdir()) + p = base.joinpath( + f'{self.id}_{time.strftime("%Y%m%d-%H%M%S")}_{token_urlsafe(6)}' + ) + p.mkdir(parents=True) + return p + + if path.is_dir(): + return path + + self.logger.log_error(f"Data directory {path} is not an existing directory.") + sys.exit(1) + def add_parser(self) -> None: # Commands grp_cmd = self.parser.add_argument_group("Command") @@ -58,8 +83,17 @@ def add_parser(self) -> None: grp_param.add_argument("--output", type=str, help=ArgHelp.output, default="") grp_param.add_argument("--source", type=str, help=ArgHelp.source, default="") grp_param.add_argument("--precision", type=int, help=ArgHelp.prec, default=0) + grp_param.add_argument( + "--data-dir", + default=os.environ.get("PENRUN_ARTIFACTS"), + type=Path, + help="Folder for artifacts", + ) def main(self, args: Namespace) -> None: + self.artifacts_dir = self.prepare_artifactsdir(args.data_dir) + self.logger.log_preamble(f"Storing artifacts at {self.artifacts_dir}") + args = vars(args) # Commands analyze_on = args["a"] From 42d7e80284c0c611da38d50fed0d9a92b9447c20 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 26 Jul 2022 09:11:08 +0200 Subject: [PATCH 13/26] use artifacts dir to store results --- src/gallia/analyzer/analyzer.py | 23 +++++----- src/gallia/analyzer/categorizer.py | 8 ++-- src/gallia/analyzer/main.py | 28 ++++++------ src/gallia/analyzer/reporter.py | 64 +++++++--------------------- src/gallia/analyzer/time_analyzer.py | 7 ++- 5 files changed, 50 insertions(+), 80 deletions(-) diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py index 4e90e8afb..6cd5fab5c 100644 --- a/src/gallia/analyzer/analyzer.py +++ b/src/gallia/analyzer/analyzer.py @@ -5,9 +5,9 @@ """ gallia-analyze Analyzer module """ -import os import json from json.decoder import JSONDecodeError +from pathlib import Path from sqlite3 import OperationalError import textwrap from typing import Tuple @@ -28,13 +28,18 @@ class Analyzer(Operator): def __init__( self, - path: str = "", + path: str, + artifacts_dir: Path, log_mode: LogMode = LogMode.STD_OUT, debug_on: bool = False, ): Operator.__init__(self, path, log_mode) self.msg_head = "[Analyzer] " self.debug_on = debug_on + self.debug_dir = artifacts_dir.joinpath("debug") + if debug_on: + self.debug_dir.mkdir() + self.artifacts_dir = artifacts_dir def analyze(self, runs_vec: np.ndarray, op_mode: OpMode = OpMode.VEN_SPEC) -> bool: """ @@ -107,11 +112,8 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: except KeyError as exc: self.log("condition key reading failed", True, exc) if self.debug_on: - if not os.path.isdir("debug"): - os.mkdir("debug") - with open( - f"./debug/analyze_serv_{str(run)}.sql", "w", encoding="utf8" - ) as file: + path = self.debug_dir.joinpath(f"analyze_serv_{str(run)}.sql") + with path.open("w", encoding="utf8") as file: file.write(analyze_sql) self.cur.executescript(analyze_sql) self.con.commit() @@ -193,11 +195,8 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: """ analyze_sql += textwrap.dedent(drop_view_sql) + "\n" if self.debug_on: - if not os.path.isdir("debug"): - os.mkdir("debug") - with open( - f"./debug/analyze_iden_{str(run)}.sql", "w", encoding="utf8" - ) as file: + path = self.debug_dir.joinpath(f"analyze_iden_{str(run)}.sql") + with path.open("w", encoding="utf8") as file: file.write(analyze_sql) self.cur.executescript(analyze_sql) self.con.commit() diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index cccfcf4da..ee90da5f5 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -5,7 +5,7 @@ """ gallia-analyze Categorizer module """ - +from pathlib import Path from sqlite3 import OperationalError from typing import cast @@ -26,8 +26,10 @@ class Categorizer(Analyzer): Inherited from Analyzer. """ - def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): - Analyzer.__init__(self, path, log_mode) + def __init__( + self, path: str, artifacts_dir: Path, log_mode: LogMode = LogMode.STD_OUT + ): + Analyzer.__init__(self, path, artifacts_dir, log_mode) self.msg_head = "[Categorizer] " def analyze_serv(self, run: int, op_mode: OpMode) -> bool: diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 6f8897309..02c1dddc7 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -36,7 +36,7 @@ class AnalyzerMain(Script): - def __init__(self): + def __init__(self) -> None: super().__init__() self.artifacts_dir: Path @@ -80,7 +80,6 @@ def add_parser(self) -> None: grp_param.add_argument("--sid", type=int, help=ArgHelp.sid, default=-1) grp_param.add_argument("--from", type=int, help=ArgHelp.first, default=0) grp_param.add_argument("--to", type=int, help=ArgHelp.last, default=0) - grp_param.add_argument("--output", type=str, help=ArgHelp.output, default="") grp_param.add_argument("--source", type=str, help=ArgHelp.source, default="") grp_param.add_argument("--precision", type=int, help=ArgHelp.prec, default=0) grp_param.add_argument( @@ -117,7 +116,6 @@ def main(self, args: Namespace) -> None: db_path = args["source"] run_start = args["from"] run_end = args["to"] + 1 - file_path = args["output"] t_prec = args["precision"] if run_end <= run_start: @@ -151,42 +149,44 @@ def main(self, args: Namespace) -> None: if analyze_on: if categorizer_on: - categorizer = Categorizer(db_path, log_mode) + categorizer = Categorizer(db_path, self.artifacts_dir, log_mode) an_opt = categorizer.get_op_mode(iso_on) categorizer.analyze(runs_vec, an_opt) else: - analyzer = Analyzer(db_path, log_mode, debug_on) + analyzer = Analyzer(db_path, self.artifacts_dir, log_mode, debug_on) an_opt = analyzer.get_op_mode(iso_on) analyzer.analyze(runs_vec, an_opt) if t_analyze_on: if t_prec > 0: - time_analyzer = TimeAnalyzer(db_path, t_prec, log_mode) + time_analyzer = TimeAnalyzer( + db_path, self.artifacts_dir, t_prec, log_mode + ) else: - time_analyzer = TimeAnalyzer(db_path, log_mode=log_mode) + time_analyzer = TimeAnalyzer( + db_path, self.artifacts_dir, log_mode=log_mode + ) time_analyzer.extract_tra(runs_vec) time_analyzer.hist_tra(runs_vec) time_analyzer.plot_tra(runs_vec) if report_on or aio_service_on or aio_identifier_on: - reporter = Reporter(db_path, log_mode) + reporter = Reporter(db_path, self.artifacts_dir, log_mode) if report_on: - reporter.report_xl(runs_vec, show_possible_on, file_path) + reporter.report_xl(runs_vec, show_possible_on) if aio_service_on: - reporter.consolidate_xl_serv(file_path, show_possible_on) + reporter.consolidate_xl_serv(show_possible_on) if aio_identifier_on: if all_services_on: - reporter.iterate_all(file_path, show_possible_on) + reporter.iterate_all(show_possible_on) else: if service_id == -1: print("Please input Service ID with --sid option.") else: - reporter.consolidate_xl_iden( - service_id, file_path, show_possible_on - ) + reporter.consolidate_xl_iden(service_id, show_possible_on) print( f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}" diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index a4702eb56..8b086993b 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -6,6 +6,7 @@ gallia-analyze Reporter module """ import os +from pathlib import Path from typing import cast import numpy as np @@ -24,24 +25,26 @@ class Reporter(Operator): Reporter class for generating EXCEL report and visualizing data with graphs and data frames. """ - def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): + def __init__( + self, path: str, artifacts_dir: Path, log_mode: LogMode = LogMode.STD_OUT + ): Operator.__init__(self, path, log_mode) self.msg_head = "[Reporter] " - self.out_path = f"./reports_{os.path.basename(path)}/" + self.artifacts_dir = artifacts_dir self.abn_serv_vec = np.array([]) self.abn_iden_vec = np.array([]) self.xl_ext = ".xlsx" - def iterate_all(self, out_path: str, show_psb: bool = False) -> bool: + def iterate_all(self, show_psb: bool = False) -> bool: """ consolidate all scan_identifier runs for all services by identifier respectively into EXCEL files. """ for serv in self.iso_serv_by_iden_vec: - if not self.consolidate_xl_iden(serv, out_path, show_psb): + if not self.consolidate_xl_iden(serv, show_psb): continue return True - def consolidate_xl_serv(self, out_path: str, show_psb: bool = False) -> bool: + def consolidate_xl_serv(self, show_psb: bool = False) -> bool: """ consolidate all scan_service runs sorted by ECU mode into one EXCEL file. """ @@ -50,7 +53,6 @@ def consolidate_xl_serv(self, out_path: str, show_psb: bool = False) -> bool: self.load_ven_sess() self.load_ven_lu() self.log(f"consolidating scan_service by ECU mode from {self.db_path} ...") - self.set_path_prefix(out_path) xl_generator = ExcelGenerator(self.db_path, self.log_mode) xl_is_empty = True for ecu_mode in np.arange(self.num_modes): @@ -77,7 +79,6 @@ def consolidate_xl_serv(self, out_path: str, show_psb: bool = False) -> bool: self.log(f"nothing to report for ECU mode {ecu_mode}.") if xl_is_empty: return False - self.set_path_prefix(out_path) out_path = self.get_path( "all_services_by_ecu_mode", self.xl_ext, rm_if_exists=True ) @@ -85,9 +86,7 @@ def consolidate_xl_serv(self, out_path: str, show_psb: bool = False) -> bool: return False return True - def consolidate_xl_iden( - self, serv: int, out_path: str, show_psb: bool = False - ) -> bool: + def consolidate_xl_iden(self, serv: int, show_psb: bool = False) -> bool: """ consolidate all scan_identifier runs sorted by ECU mode for a certain given service into one EXCEL file. @@ -102,7 +101,6 @@ def consolidate_xl_iden( self.log( f"consolidating for Service ID 0x{serv:02X} {self.iso_serv_code_dict[serv]} from {self.db_path} ..." ) - self.set_path_prefix(out_path) xl_generator = ExcelGenerator(self.db_path, self.log_mode) xl_is_empty = True if self.num_modes == 0: @@ -137,7 +135,6 @@ def consolidate_xl_iden( if xl_is_empty: self.log(f"nothing to report for Service ID 0x{serv:02X}") return False - self.set_path_prefix(out_path) out_path = self.get_path( f"0x{serv:02X}_{self.iso_serv_code_dict[serv]}", self.xl_ext, @@ -151,7 +148,6 @@ def report_xl( self, runs_vec: np.ndarray, show_psb: bool = False, - out_path: str = "", ) -> bool: """ generate EXCEL report for given input runs. @@ -160,7 +156,6 @@ def report_xl( return False self.load_ven_sess() self.load_ven_lu() - self.set_path_prefix(out_path) for run in runs_vec: self.report_xl_each_run(run, show_psb) return True @@ -221,47 +216,18 @@ def report_xl_iden(self, run: int, show_psb: bool = False) -> bool: return False return True - def set_path_prefix(self, path: str = "") -> None: - """ - set path prefix for EXCEL report file to save. - """ - if path == "": - self.out_path = os.path.expanduser( - f"./reports_{os.path.basename(self.db_path)}/" - ) - elif path == ".": - self.out_path = os.path.expanduser("./") - elif path == "..": - self.out_path = os.path.expanduser("../") - elif path[-1] == "/": - self.out_path = os.path.expanduser(path) - else: - self.out_path = os.path.expanduser(path + "/") - def get_path( self, suffix: str = "", ext: str = ".xlsx", rm_if_exists: bool = False ) -> str: """ get path for EXCEL report file by combining path prefix, - run number and EXCEL extention. + run number and EXCEL extension. """ - try: - dir_name = os.path.dirname(self.out_path) - file_name = os.path.basename(self.out_path) - if dir_name == "": - dir_name = "./" - out_path = os.path.join(dir_name, file_name + str(suffix) + ext) - if os.path.isdir(dir_name): - if os.path.isfile(out_path) and rm_if_exists: - os.remove(out_path) - self.log(f"existing file removed from {out_path}") - else: - os.mkdir(dir_name) - self.log(f"directory created at {dir_name}") - except (OSError) as exc: - self.log("getting path failed", True, exc) - return f"./reports/run{str(suffix)}{ext}" - return out_path + out_path = self.artifacts_dir.joinpath(f"{suffix}{ext}") + if out_path.is_file() and rm_if_exists: + os.remove(out_path) + self.log(f"existing file removed from {out_path}") + return str(out_path) def get_entries_oi(self, scan_mode: ScanMode, show_psb: bool = False) -> np.ndarray: """ diff --git a/src/gallia/analyzer/time_analyzer.py b/src/gallia/analyzer/time_analyzer.py index d770a46bc..94e2061a2 100644 --- a/src/gallia/analyzer/time_analyzer.py +++ b/src/gallia/analyzer/time_analyzer.py @@ -6,6 +6,8 @@ gallia-analyze Time Analyzer module """ import json +from pathlib import Path + import numpy as np import pandas as pd from pandas.core.indexing import IndexingError @@ -24,11 +26,12 @@ class TimeAnalyzer(Reporter): def __init__( self, - path: str = "", + path: str, + artifacts_dir: Path, t_prec: int = DFT_T_PREC, log_mode: LogMode = LogMode.STD_OUT, ): - Reporter.__init__(self, path, log_mode) + Reporter.__init__(self, path, artifacts_dir, log_mode) self.msg_head = "[TimeAnalyzer] " self.t_prec = t_prec self.jpg_ext = ".jpg" From 814c80f4f34b4f9d9eecb60f7715a17c63caea6d Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Tue, 26 Jul 2022 09:11:44 +0200 Subject: [PATCH 14/26] update poetry.lock --- poetry.lock | 715 ++++++++++++++++++++++++++++++++++++++++++++----- pyproject.toml | 1 - 2 files changed, 650 insertions(+), 66 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3b7e0be15..8a9399c3f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "aiofiles" @@ -367,6 +367,69 @@ files = [ construct = "2.10.68" typing-extensions = ">=4.6.0" +[[package]] +name = "contourpy" +version = "1.2.1" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040"}, + {file = "contourpy-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd"}, + {file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c75507d0a55378240f781599c30e7776674dbaf883a46d1c90f37e563453480"}, + {file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11959f0ce4a6f7b76ec578576a0b61a28bdc0696194b6347ba3f1c53827178b9"}, + {file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb3315a8a236ee19b6df481fc5f997436e8ade24a9f03dfdc6bd490fea20c6da"}, + {file = "contourpy-1.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39f3ecaf76cd98e802f094e0d4fbc6dc9c45a8d0c4d185f0f6c2234e14e5f75b"}, + {file = "contourpy-1.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:94b34f32646ca0414237168d68a9157cb3889f06b096612afdd296003fdd32fd"}, + {file = "contourpy-1.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:457499c79fa84593f22454bbd27670227874cd2ff5d6c84e60575c8b50a69619"}, + {file = "contourpy-1.2.1-cp310-cp310-win32.whl", hash = "sha256:ac58bdee53cbeba2ecad824fa8159493f0bf3b8ea4e93feb06c9a465d6c87da8"}, + {file = "contourpy-1.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9cffe0f850e89d7c0012a1fb8730f75edd4320a0a731ed0c183904fe6ecfc3a9"}, + {file = "contourpy-1.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6022cecf8f44e36af10bd9118ca71f371078b4c168b6e0fab43d4a889985dbb5"}, + {file = "contourpy-1.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef5adb9a3b1d0c645ff694f9bca7702ec2c70f4d734f9922ea34de02294fdf72"}, + {file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6150ffa5c767bc6332df27157d95442c379b7dce3a38dff89c0f39b63275696f"}, + {file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c863140fafc615c14a4bf4efd0f4425c02230eb8ef02784c9a156461e62c965"}, + {file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:00e5388f71c1a0610e6fe56b5c44ab7ba14165cdd6d695429c5cd94021e390b2"}, + {file = "contourpy-1.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4492d82b3bc7fbb7e3610747b159869468079fe149ec5c4d771fa1f614a14df"}, + {file = "contourpy-1.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49e70d111fee47284d9dd867c9bb9a7058a3c617274900780c43e38d90fe1205"}, + {file = "contourpy-1.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b59c0ffceff8d4d3996a45f2bb6f4c207f94684a96bf3d9728dbb77428dd8cb8"}, + {file = "contourpy-1.2.1-cp311-cp311-win32.whl", hash = "sha256:7b4182299f251060996af5249c286bae9361fa8c6a9cda5efc29fe8bfd6062ec"}, + {file = "contourpy-1.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2855c8b0b55958265e8b5888d6a615ba02883b225f2227461aa9127c578a4922"}, + {file = "contourpy-1.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:62828cada4a2b850dbef89c81f5a33741898b305db244904de418cc957ff05dc"}, + {file = "contourpy-1.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:309be79c0a354afff9ff7da4aaed7c3257e77edf6c1b448a779329431ee79d7e"}, + {file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e785e0f2ef0d567099b9ff92cbfb958d71c2d5b9259981cd9bee81bd194c9a4"}, + {file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cac0a8f71a041aa587410424ad46dfa6a11f6149ceb219ce7dd48f6b02b87a7"}, + {file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af3f4485884750dddd9c25cb7e3915d83c2db92488b38ccb77dd594eac84c4a0"}, + {file = "contourpy-1.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ce6889abac9a42afd07a562c2d6d4b2b7134f83f18571d859b25624a331c90b"}, + {file = "contourpy-1.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a1eea9aecf761c661d096d39ed9026574de8adb2ae1c5bd7b33558af884fb2ce"}, + {file = "contourpy-1.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:187fa1d4c6acc06adb0fae5544c59898ad781409e61a926ac7e84b8f276dcef4"}, + {file = "contourpy-1.2.1-cp312-cp312-win32.whl", hash = "sha256:c2528d60e398c7c4c799d56f907664673a807635b857df18f7ae64d3e6ce2d9f"}, + {file = "contourpy-1.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:1a07fc092a4088ee952ddae19a2b2a85757b923217b7eed584fdf25f53a6e7ce"}, + {file = "contourpy-1.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bb6834cbd983b19f06908b45bfc2dad6ac9479ae04abe923a275b5f48f1a186b"}, + {file = "contourpy-1.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1d59e739ab0e3520e62a26c60707cc3ab0365d2f8fecea74bfe4de72dc56388f"}, + {file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd3db01f59fdcbce5b22afad19e390260d6d0222f35a1023d9adc5690a889364"}, + {file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a12a813949e5066148712a0626895c26b2578874e4cc63160bb007e6df3436fe"}, + {file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe0ccca550bb8e5abc22f530ec0466136379c01321fd94f30a22231e8a48d985"}, + {file = "contourpy-1.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445"}, + {file = "contourpy-1.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f32c38afb74bd98ce26de7cc74a67b40afb7b05aae7b42924ea990d51e4dac02"}, + {file = "contourpy-1.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d31a63bc6e6d87f77d71e1abbd7387ab817a66733734883d1fc0021ed9bfa083"}, + {file = "contourpy-1.2.1-cp39-cp39-win32.whl", hash = "sha256:ddcb8581510311e13421b1f544403c16e901c4e8f09083c881fab2be80ee31ba"}, + {file = "contourpy-1.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:10a37ae557aabf2509c79715cd20b62e4c7c28b8cd62dd7d99e5ed3ce28c3fd9"}, + {file = "contourpy-1.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a31f94983fecbac95e58388210427d68cd30fe8a36927980fab9c20062645609"}, + {file = "contourpy-1.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef2b055471c0eb466033760a521efb9d8a32b99ab907fc8358481a1dd29e3bd3"}, + {file = "contourpy-1.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b33d2bc4f69caedcd0a275329eb2198f560b325605810895627be5d4b876bf7f"}, + {file = "contourpy-1.2.1.tar.gz", hash = "sha256:4d8908b3bee1c889e547867ca4cdc54e5ab6be6d3e078556814a22457f49423c"}, +] + +[package.dependencies] +numpy = ">=1.20" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.8.0)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] + [[package]] name = "coverage" version = "7.6.0" @@ -431,6 +494,21 @@ files = [ [package.extras] toml = ["tomli"] +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + [[package]] name = "docstring-to-markdown" version = "0.15" @@ -453,6 +531,17 @@ files = [ {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, ] +[[package]] +name = "et-xmlfile" +version = "1.1.0" +description = "An implementation of lxml.xmlfile for the standard library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"}, + {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"}, +] + [[package]] name = "exitcode" version = "0.1.0" @@ -464,6 +553,71 @@ files = [ {file = "exitcode-0.1.0.tar.gz", hash = "sha256:bc9ee1420068e9654b9a5d74b60bf52cf749097c485c8106717bcf4ba25fde07"}, ] +[[package]] +name = "fonttools" +version = "4.53.1" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.53.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397"}, + {file = "fonttools-4.53.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8bf06b94694251861ba7fdeea15c8ec0967f84c3d4143ae9daf42bbc7717fe3"}, + {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b96cd370a61f4d083c9c0053bf634279b094308d52fdc2dd9a22d8372fdd590d"}, + {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1c7c5aa18dd3b17995898b4a9b5929d69ef6ae2af5b96d585ff4005033d82f0"}, + {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e013aae589c1c12505da64a7d8d023e584987e51e62006e1bb30d72f26522c41"}, + {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9efd176f874cb6402e607e4cc9b4a9cd584d82fc34a4b0c811970b32ba62501f"}, + {file = "fonttools-4.53.1-cp310-cp310-win32.whl", hash = "sha256:c8696544c964500aa9439efb6761947393b70b17ef4e82d73277413f291260a4"}, + {file = "fonttools-4.53.1-cp310-cp310-win_amd64.whl", hash = "sha256:8959a59de5af6d2bec27489e98ef25a397cfa1774b375d5787509c06659b3671"}, + {file = "fonttools-4.53.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:da33440b1413bad53a8674393c5d29ce64d8c1a15ef8a77c642ffd900d07bfe1"}, + {file = "fonttools-4.53.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ff7e5e9bad94e3a70c5cd2fa27f20b9bb9385e10cddab567b85ce5d306ea923"}, + {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6e7170d675d12eac12ad1a981d90f118c06cf680b42a2d74c6c931e54b50719"}, + {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee32ea8765e859670c4447b0817514ca79054463b6b79784b08a8df3a4d78e3"}, + {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6e08f572625a1ee682115223eabebc4c6a2035a6917eac6f60350aba297ccadb"}, + {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b21952c092ffd827504de7e66b62aba26fdb5f9d1e435c52477e6486e9d128b2"}, + {file = "fonttools-4.53.1-cp311-cp311-win32.whl", hash = "sha256:9dfdae43b7996af46ff9da520998a32b105c7f098aeea06b2226b30e74fbba88"}, + {file = "fonttools-4.53.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4d0096cb1ac7a77b3b41cd78c9b6bc4a400550e21dc7a92f2b5ab53ed74eb02"}, + {file = "fonttools-4.53.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d92d3c2a1b39631a6131c2fa25b5406855f97969b068e7e08413325bc0afba58"}, + {file = "fonttools-4.53.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b3c8ebafbee8d9002bd8f1195d09ed2bd9ff134ddec37ee8f6a6375e6a4f0e8"}, + {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f029c095ad66c425b0ee85553d0dc326d45d7059dbc227330fc29b43e8ba60"}, + {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f5e6c3510b79ea27bb1ebfcc67048cde9ec67afa87c7dd7efa5c700491ac7f"}, + {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f677ce218976496a587ab17140da141557beb91d2a5c1a14212c994093f2eae2"}, + {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9e6ceba2a01b448e36754983d376064730690401da1dd104ddb543519470a15f"}, + {file = "fonttools-4.53.1-cp312-cp312-win32.whl", hash = "sha256:791b31ebbc05197d7aa096bbc7bd76d591f05905d2fd908bf103af4488e60670"}, + {file = "fonttools-4.53.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ed170b5e17da0264b9f6fae86073be3db15fa1bd74061c8331022bca6d09bab"}, + {file = "fonttools-4.53.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c818c058404eb2bba05e728d38049438afd649e3c409796723dfc17cd3f08749"}, + {file = "fonttools-4.53.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:651390c3b26b0c7d1f4407cad281ee7a5a85a31a110cbac5269de72a51551ba2"}, + {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e54f1bba2f655924c1138bbc7fa91abd61f45c68bd65ab5ed985942712864bbb"}, + {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9cd19cf4fe0595ebdd1d4915882b9440c3a6d30b008f3cc7587c1da7b95be5f"}, + {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2af40ae9cdcb204fc1d8f26b190aa16534fcd4f0df756268df674a270eab575d"}, + {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:35250099b0cfb32d799fb5d6c651220a642fe2e3c7d2560490e6f1d3f9ae9169"}, + {file = "fonttools-4.53.1-cp38-cp38-win32.whl", hash = "sha256:f08df60fbd8d289152079a65da4e66a447efc1d5d5a4d3f299cdd39e3b2e4a7d"}, + {file = "fonttools-4.53.1-cp38-cp38-win_amd64.whl", hash = "sha256:7b6b35e52ddc8fb0db562133894e6ef5b4e54e1283dff606fda3eed938c36fc8"}, + {file = "fonttools-4.53.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75a157d8d26c06e64ace9df037ee93a4938a4606a38cb7ffaf6635e60e253b7a"}, + {file = "fonttools-4.53.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4824c198f714ab5559c5be10fd1adf876712aa7989882a4ec887bf1ef3e00e31"}, + {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:becc5d7cb89c7b7afa8321b6bb3dbee0eec2b57855c90b3e9bf5fb816671fa7c"}, + {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ec3fb43befb54be490147b4a922b5314e16372a643004f182babee9f9c3407"}, + {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:73379d3ffdeecb376640cd8ed03e9d2d0e568c9d1a4e9b16504a834ebadc2dfb"}, + {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02569e9a810f9d11f4ae82c391ebc6fb5730d95a0657d24d754ed7763fb2d122"}, + {file = "fonttools-4.53.1-cp39-cp39-win32.whl", hash = "sha256:aae7bd54187e8bf7fd69f8ab87b2885253d3575163ad4d669a262fe97f0136cb"}, + {file = "fonttools-4.53.1-cp39-cp39-win_amd64.whl", hash = "sha256:e5b708073ea3d684235648786f5f6153a48dc8762cdfe5563c57e80787c29fbb"}, + {file = "fonttools-4.53.1-py3-none-any.whl", hash = "sha256:f1f8758a2ad110bd6432203a344269f445a2907dc24ef6bccfd0ac4e14e0d71d"}, + {file = "fonttools-4.53.1.tar.gz", hash = "sha256:e128778a8e9bc11159ce5447f76766cefbd876f44bd79aff030287254e4752c4"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + [[package]] name = "h11" version = "0.14.0" @@ -589,6 +743,119 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "kiwisolver" +version = "1.4.5" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, +] + [[package]] name = "license-expression" version = "30.3.0" @@ -700,6 +967,58 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "matplotlib" +version = "3.9.1" +description = "Python plotting package" +optional = false +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.9.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7ccd6270066feb9a9d8e0705aa027f1ff39f354c72a87efe8fa07632f30fc6bb"}, + {file = "matplotlib-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:591d3a88903a30a6d23b040c1e44d1afdd0d778758d07110eb7596f811f31842"}, + {file = "matplotlib-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2a59ff4b83d33bca3b5ec58203cc65985367812cb8c257f3e101632be86d92"}, + {file = "matplotlib-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fc001516ffcf1a221beb51198b194d9230199d6842c540108e4ce109ac05cc0"}, + {file = "matplotlib-3.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:83c6a792f1465d174c86d06f3ae85a8fe36e6f5964633ae8106312ec0921fdf5"}, + {file = "matplotlib-3.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:421851f4f57350bcf0811edd754a708d2275533e84f52f6760b740766c6747a7"}, + {file = "matplotlib-3.9.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b3fce58971b465e01b5c538f9d44915640c20ec5ff31346e963c9e1cd66fa812"}, + {file = "matplotlib-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a973c53ad0668c53e0ed76b27d2eeeae8799836fd0d0caaa4ecc66bf4e6676c0"}, + {file = "matplotlib-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82cd5acf8f3ef43f7532c2f230249720f5dc5dd40ecafaf1c60ac8200d46d7eb"}, + {file = "matplotlib-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab38a4f3772523179b2f772103d8030215b318fef6360cb40558f585bf3d017f"}, + {file = "matplotlib-3.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2315837485ca6188a4b632c5199900e28d33b481eb083663f6a44cfc8987ded3"}, + {file = "matplotlib-3.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:a0c977c5c382f6696caf0bd277ef4f936da7e2aa202ff66cad5f0ac1428ee15b"}, + {file = "matplotlib-3.9.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:565d572efea2b94f264dd86ef27919515aa6d629252a169b42ce5f570db7f37b"}, + {file = "matplotlib-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d397fd8ccc64af2ec0af1f0efc3bacd745ebfb9d507f3f552e8adb689ed730a"}, + {file = "matplotlib-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26040c8f5121cd1ad712abffcd4b5222a8aec3a0fe40bc8542c94331deb8780d"}, + {file = "matplotlib-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12cb1837cffaac087ad6b44399d5e22b78c729de3cdae4629e252067b705e2b"}, + {file = "matplotlib-3.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0e835c6988edc3d2d08794f73c323cc62483e13df0194719ecb0723b564e0b5c"}, + {file = "matplotlib-3.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:44a21d922f78ce40435cb35b43dd7d573cf2a30138d5c4b709d19f00e3907fd7"}, + {file = "matplotlib-3.9.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0c584210c755ae921283d21d01f03a49ef46d1afa184134dd0f95b0202ee6f03"}, + {file = "matplotlib-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11fed08f34fa682c2b792942f8902e7aefeed400da71f9e5816bea40a7ce28fe"}, + {file = "matplotlib-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0000354e32efcfd86bda75729716b92f5c2edd5b947200be9881f0a671565c33"}, + {file = "matplotlib-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db17fea0ae3aceb8e9ac69c7e3051bae0b3d083bfec932240f9bf5d0197a049"}, + {file = "matplotlib-3.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:208cbce658b72bf6a8e675058fbbf59f67814057ae78165d8a2f87c45b48d0ff"}, + {file = "matplotlib-3.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:dc23f48ab630474264276be156d0d7710ac6c5a09648ccdf49fef9200d8cbe80"}, + {file = "matplotlib-3.9.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3fda72d4d472e2ccd1be0e9ccb6bf0d2eaf635e7f8f51d737ed7e465ac020cb3"}, + {file = "matplotlib-3.9.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:84b3ba8429935a444f1fdc80ed930babbe06725bcf09fbeb5c8757a2cd74af04"}, + {file = "matplotlib-3.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b918770bf3e07845408716e5bbda17eadfc3fcbd9307dc67f37d6cf834bb3d98"}, + {file = "matplotlib-3.9.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f1f2e5d29e9435c97ad4c36fb6668e89aee13d48c75893e25cef064675038ac9"}, + {file = "matplotlib-3.9.1.tar.gz", hash = "sha256:de06b19b8db95dd33d0dc17c926c7c9ebed9f572074b6fac4f65068a6814d010"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] + [[package]] name = "mdit-py-plugins" version = "0.4.1" @@ -803,6 +1122,7 @@ files = [ {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"}, {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"}, {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"}, + {file = "msgpack-1.0.8-py3-none-any.whl", hash = "sha256:24f727df1e20b9876fa6e95f840a2a2651e34c0ad147676356f4bf5fbb0206ca"}, {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"}, ] @@ -860,43 +1180,43 @@ yaml = ["pyyaml"] [[package]] name = "mypy" -version = "1.10.1" +version = "1.11.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, + {file = "mypy-1.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3824187c99b893f90c845bab405a585d1ced4ff55421fdf5c84cb7710995229"}, + {file = "mypy-1.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:96f8dbc2c85046c81bcddc246232d500ad729cb720da4e20fce3b542cab91287"}, + {file = "mypy-1.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a5d8d8dd8613a3e2be3eae829ee891b6b2de6302f24766ff06cb2875f5be9c6"}, + {file = "mypy-1.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:72596a79bbfb195fd41405cffa18210af3811beb91ff946dbcb7368240eed6be"}, + {file = "mypy-1.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:35ce88b8ed3a759634cb4eb646d002c4cef0a38f20565ee82b5023558eb90c00"}, + {file = "mypy-1.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:98790025861cb2c3db8c2f5ad10fc8c336ed2a55f4daf1b8b3f877826b6ff2eb"}, + {file = "mypy-1.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:25bcfa75b9b5a5f8d67147a54ea97ed63a653995a82798221cca2a315c0238c1"}, + {file = "mypy-1.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bea2a0e71c2a375c9fa0ede3d98324214d67b3cbbfcbd55ac8f750f85a414e3"}, + {file = "mypy-1.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2b3d36baac48e40e3064d2901f2fbd2a2d6880ec6ce6358825c85031d7c0d4d"}, + {file = "mypy-1.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:d8e2e43977f0e09f149ea69fd0556623919f816764e26d74da0c8a7b48f3e18a"}, + {file = "mypy-1.11.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1d44c1e44a8be986b54b09f15f2c1a66368eb43861b4e82573026e04c48a9e20"}, + {file = "mypy-1.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cea3d0fb69637944dd321f41bc896e11d0fb0b0aa531d887a6da70f6e7473aba"}, + {file = "mypy-1.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a83ec98ae12d51c252be61521aa5731f5512231d0b738b4cb2498344f0b840cd"}, + {file = "mypy-1.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7b73a856522417beb78e0fb6d33ef89474e7a622db2653bc1285af36e2e3e3d"}, + {file = "mypy-1.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:f2268d9fcd9686b61ab64f077be7ffbc6fbcdfb4103e5dd0cc5eaab53a8886c2"}, + {file = "mypy-1.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:940bfff7283c267ae6522ef926a7887305945f716a7704d3344d6d07f02df850"}, + {file = "mypy-1.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:14f9294528b5f5cf96c721f231c9f5b2733164e02c1c018ed1a0eff8a18005ac"}, + {file = "mypy-1.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7b54c27783991399046837df5c7c9d325d921394757d09dbcbf96aee4649fe9"}, + {file = "mypy-1.11.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:65f190a6349dec29c8d1a1cd4aa71284177aee5949e0502e6379b42873eddbe7"}, + {file = "mypy-1.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbe286303241fea8c2ea5466f6e0e6a046a135a7e7609167b07fd4e7baf151bf"}, + {file = "mypy-1.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:104e9c1620c2675420abd1f6c44bab7dd33cc85aea751c985006e83dcd001095"}, + {file = "mypy-1.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f006e955718ecd8d159cee9932b64fba8f86ee6f7728ca3ac66c3a54b0062abe"}, + {file = "mypy-1.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:becc9111ca572b04e7e77131bc708480cc88a911adf3d0239f974c034b78085c"}, + {file = "mypy-1.11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6801319fe76c3f3a3833f2b5af7bd2c17bb93c00026a2a1b924e6762f5b19e13"}, + {file = "mypy-1.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:c1a184c64521dc549324ec6ef7cbaa6b351912be9cb5edb803c2808a0d7e85ac"}, + {file = "mypy-1.11.0-py3-none-any.whl", hash = "sha256:56913ec8c7638b0091ef4da6fcc9136896914a9d60d54670a75880c3e5b99ace"}, + {file = "mypy-1.11.0.tar.gz", hash = "sha256:93743608c7348772fdc717af4aeee1997293a1ad04bc0ea6efa15bf65385c538"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -941,6 +1261,81 @@ rtd = ["ipython", "sphinx (>=7)", "sphinx-autodoc2 (>=0.5.0,<0.6.0)", "sphinx-bo testing = ["beautifulsoup4", "coverage[toml]", "defusedxml", "pytest (>=8,<9)", "pytest-cov", "pytest-param-files (>=0.6.0,<0.7.0)", "pytest-regressions", "sphinx-pytest"] testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0,<0.7.0)"] +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "openpyxl" +version = "3.1.5" +description = "A Python library to read/write Excel 2010 xlsx/xlsm files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "openpyxl-3.1.5-py2.py3-none-any.whl", hash = "sha256:5282c12b107bffeef825f4617dc029afaf41d0ea60823bbb665ef3079dc79de2"}, + {file = "openpyxl-3.1.5.tar.gz", hash = "sha256:cf0e3cf56142039133628b5acffe8ef0c12bc902d2aadd3e0fe5878dc08d1050"}, +] + +[package.dependencies] +et-xmlfile = "*" + +[[package]] +name = "openpyxl-stubs" +version = "0.1.25" +description = "Type stubs for openpyxl" +optional = false +python-versions = "*" +files = [ + {file = "openpyxl-stubs-0.1.25.tar.gz", hash = "sha256:108b112df072f7645ca356eacdd5730b1bd986c67ae33366a4a13c6879c369e7"}, + {file = "openpyxl_stubs-0.1.25-py3-none-any.whl", hash = "sha256:db29f7804993b4a46b155fc4be45314c14538cb475b00591d8096e5af486abf1"}, +] + +[package.dependencies] +mypy = ">=0.720" +openpyxl = ">=3.0.0" +typing-extensions = ">=3.7.4" + [[package]] name = "packaging" version = "24.1" @@ -952,6 +1347,50 @@ files = [ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] +[[package]] +name = "pandas" +version = "1.5.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, +] + +[package.dependencies] +numpy = {version = ">=1.23.2", markers = "python_version >= \"3.11\""} +python-dateutil = ">=2.8.1" +pytz = ">=2020.1" + +[package.extras] +test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] + [[package]] name = "parso" version = "0.8.4" @@ -967,6 +1406,103 @@ files = [ qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["docopt", "pytest"] +[[package]] +name = "pillow" +version = "10.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" version = "4.2.2" @@ -1251,22 +1787,36 @@ rope = ">=0.21.0" dev = ["build", "pytest", "twine"] test = ["flake8", "pytest", "pytest-cov"] +[[package]] +name = "pyparsing" +version = "3.1.2" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "pytest" -version = "8.2.2" +version = "8.3.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, - {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, + {file = "pytest-8.3.1-py3-none-any.whl", hash = "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c"}, + {file = "pytest-8.3.1.tar.gz", hash = "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6"}, ] [package.dependencies] colorama = {version = "*", markers = "sys_platform == \"win32\""} iniconfig = "*" packaging = "*" -pluggy = ">=1.5,<2.0" +pluggy = ">=1.5,<2" [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] @@ -1342,6 +1892,20 @@ serial = ["pyserial (>=3.0,<4.0)"] sontheim = ["python-can-sontheim (>=0.1.2)"] viewer = ["windows-curses"] +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + [[package]] name = "python-debian" version = "0.1.49" @@ -1426,6 +1990,17 @@ gendocs = ["pytoolconfig[doc]", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (> global = ["platformdirs (>=3.11.0)"] validation = ["pydantic (>=2.5.3)"] +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + [[package]] name = "pywin32" version = "306" @@ -1474,7 +2049,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -1571,29 +2145,40 @@ release = ["pip-tools (>=6.12.1)", "toml (>=0.10.2)", "twine (>=4.0.2)"] [[package]] name = "ruff" -version = "0.5.2" +version = "0.5.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.2-py3-none-linux_armv6l.whl", hash = "sha256:7bab8345df60f9368d5f4594bfb8b71157496b44c30ff035d1d01972e764d3be"}, - {file = "ruff-0.5.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1aa7acad382ada0189dbe76095cf0a36cd0036779607c397ffdea16517f535b1"}, - {file = "ruff-0.5.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:aec618d5a0cdba5592c60c2dee7d9c865180627f1a4a691257dea14ac1aa264d"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b62adc5ce81780ff04077e88bac0986363e4a3260ad3ef11ae9c14aa0e67ef"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dc42ebf56ede83cb080a50eba35a06e636775649a1ffd03dc986533f878702a3"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c15c6e9f88c67ffa442681365d11df38afb11059fc44238e71a9d9f1fd51de70"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d3de9a5960f72c335ef00763d861fc5005ef0644cb260ba1b5a115a102157251"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe5a968ae933e8f7627a7b2fc8893336ac2be0eb0aace762d3421f6e8f7b7f83"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a04f54a9018f75615ae52f36ea1c5515e356e5d5e214b22609ddb546baef7132"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed02fb52e3741f0738db5f93e10ae0fb5c71eb33a4f2ba87c9a2fa97462a649"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3cf8fe659f6362530435d97d738eb413e9f090e7e993f88711b0377fbdc99f60"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:237a37e673e9f3cbfff0d2243e797c4862a44c93d2f52a52021c1a1b0899f846"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2a2949ce7c1cbd8317432ada80fe32156df825b2fd611688814c8557824ef060"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:481af57c8e99da92ad168924fd82220266043c8255942a1cb87958b108ac9335"}, - {file = "ruff-0.5.2-py3-none-win32.whl", hash = "sha256:f1aea290c56d913e363066d83d3fc26848814a1fed3d72144ff9c930e8c7c718"}, - {file = "ruff-0.5.2-py3-none-win_amd64.whl", hash = "sha256:8532660b72b5d94d2a0a7a27ae7b9b40053662d00357bb2a6864dd7e38819084"}, - {file = "ruff-0.5.2-py3-none-win_arm64.whl", hash = "sha256:73439805c5cb68f364d826a5c5c4b6c798ded6b7ebaa4011f01ce6c94e4d5583"}, - {file = "ruff-0.5.2.tar.gz", hash = "sha256:2c0df2d2de685433794a14d8d2e240df619b748fbe3367346baa519d8e6f1ca2"}, + {file = "ruff-0.5.4-py3-none-linux_armv6l.whl", hash = "sha256:82acef724fc639699b4d3177ed5cc14c2a5aacd92edd578a9e846d5b5ec18ddf"}, + {file = "ruff-0.5.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:da62e87637c8838b325e65beee485f71eb36202ce8e3cdbc24b9fcb8b99a37be"}, + {file = "ruff-0.5.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e98ad088edfe2f3b85a925ee96da652028f093d6b9b56b76fc242d8abb8e2059"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c55efbecc3152d614cfe6c2247a3054cfe358cefbf794f8c79c8575456efe19"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9b85eaa1f653abd0a70603b8b7008d9e00c9fa1bbd0bf40dad3f0c0bdd06793"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf497a47751be8c883059c4613ba2f50dd06ec672692de2811f039432875278"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:09c14ed6a72af9ccc8d2e313d7acf7037f0faff43cde4b507e66f14e812e37f7"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:628f6b8f97b8bad2490240aa84f3e68f390e13fabc9af5c0d3b96b485921cd60"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3520a00c0563d7a7a7c324ad7e2cde2355733dafa9592c671fb2e9e3cd8194c1"}, + {file = "ruff-0.5.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93789f14ca2244fb91ed481456f6d0bb8af1f75a330e133b67d08f06ad85b516"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:029454e2824eafa25b9df46882f7f7844d36fd8ce51c1b7f6d97e2615a57bbcc"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9492320eed573a13a0bc09a2957f17aa733fff9ce5bf00e66e6d4a88ec33813f"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6e1f62a92c645e2919b65c02e79d1f61e78a58eddaebca6c23659e7c7cb4ac7"}, + {file = "ruff-0.5.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:768fa9208df2bec4b2ce61dbc7c2ddd6b1be9fb48f1f8d3b78b3332c7d71c1ff"}, + {file = "ruff-0.5.4-py3-none-win32.whl", hash = "sha256:e1e7393e9c56128e870b233c82ceb42164966f25b30f68acbb24ed69ce9c3a4e"}, + {file = "ruff-0.5.4-py3-none-win_amd64.whl", hash = "sha256:58b54459221fd3f661a7329f177f091eb35cf7a603f01d9eb3eb11cc348d38c4"}, + {file = "ruff-0.5.4-py3-none-win_arm64.whl", hash = "sha256:bd53da65f1085fb5b307c38fd3c0829e76acf7b2a912d8d79cadcdb4875c1eb7"}, + {file = "ruff-0.5.4.tar.gz", hash = "sha256:2795726d5f71c4f4e70653273d1c23a8182f07dd8e48c12de5d867bfb7557eed"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] @@ -1620,13 +2205,13 @@ files = [ [[package]] name = "sphinx" -version = "7.4.6" +version = "7.4.7" description = "Python documentation generator" optional = false python-versions = ">=3.9" files = [ - {file = "sphinx-7.4.6-py3-none-any.whl", hash = "sha256:915760d6188288a1e30c2cd0d9fa31b1b009bc6e6019cc0c32d16c77d20e86d9"}, - {file = "sphinx-7.4.6.tar.gz", hash = "sha256:116918d455c493fff3178edea12b4fe1c1e4894680fd81e7b7431ea21d47ca52"}, + {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, + {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, ] [package.dependencies] @@ -1705,13 +2290,13 @@ test = ["pytest"] [[package]] name = "sphinxcontrib-htmlhelp" -version = "2.0.5" +version = "2.0.6" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_htmlhelp-2.0.5-py3-none-any.whl", hash = "sha256:393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04"}, - {file = "sphinxcontrib_htmlhelp-2.0.5.tar.gz", hash = "sha256:0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015"}, + {file = "sphinxcontrib_htmlhelp-2.0.6-py3-none-any.whl", hash = "sha256:1b9af5a2671a61410a868fce050cab7ca393c218e6205cbc7f590136f207395c"}, + {file = "sphinxcontrib_htmlhelp-2.0.6.tar.gz", hash = "sha256:c6597da06185f0e3b4dc952777a04200611ef563882e0c244d27a15ee22afa73"}, ] [package.extras] @@ -1749,19 +2334,19 @@ test = ["flake8", "mypy", "pytest"] [[package]] name = "sphinxcontrib-qthelp" -version = "1.0.7" +version = "1.0.8" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_qthelp-1.0.7-py3-none-any.whl", hash = "sha256:e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182"}, - {file = "sphinxcontrib_qthelp-1.0.7.tar.gz", hash = "sha256:053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6"}, + {file = "sphinxcontrib_qthelp-1.0.8-py3-none-any.whl", hash = "sha256:323d6acc4189af76dfe94edd2a27d458902319b60fcca2aeef3b2180c106a75f"}, + {file = "sphinxcontrib_qthelp-1.0.8.tar.gz", hash = "sha256:db3f8fa10789c7a8e76d173c23364bdf0ebcd9449969a9e6a3dd31b8b7469f03"}, ] [package.extras] lint = ["docutils-stubs", "flake8", "mypy"] standalone = ["Sphinx (>=5)"] -test = ["pytest"] +test = ["defusedxml (>=0.7.1)", "pytest"] [[package]] name = "sphinxcontrib-serializinghtml" @@ -2157,4 +2742,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.13" -content-hash = "d952c50a017dc6f278a22bab11f2c8b917cf539f8a85fba4c8a3e759ec7486a5" +content-hash = "ee12212dfe616d2d660c8b6fe01a946422e9164388e351005ee53623252ab1e4" diff --git a/pyproject.toml b/pyproject.toml index 1b1217e4f..25966d590 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,7 +71,6 @@ reuse = "^4.0" construct-typing = ">=0.5.2,<0.7.0" pytest-cov = ">=4,<6" ruff = "^0.5.0" -pandas-stubs = "^1.4.3" openpyxl-stubs = "^0.1.21" [tool.poetry.scripts] From 28fb2b12d154bd3820497ad931e85060ef11b1a2 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Fri, 29 Jul 2022 13:41:00 +0200 Subject: [PATCH 15/26] make analyzer optional --- pyproject.toml | 2 ++ src/gallia/analyzer/main.py | 27 +++++++++++++++++++-------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 25966d590..3bdee71c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,8 @@ exitcode = "^0.1.0" psutil = ">=5.9.4,<7.0.0" httpx = ">=0.26,<0.28" more-itertools = "^10.3.0" +# A list of all of the optional dependencies, some of which are included in the +# below `extras`. They can be opted into by apps. numpy = "^1.21.4" openpyxl = "^3.0.9" pandas = "^1.3.4" diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 02c1dddc7..6ffde8e0d 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -14,14 +14,19 @@ from tempfile import gettempdir from typing import Optional -import numpy as np -from gallia.analyzer.operator import Operator -from gallia.analyzer.analyzer import Analyzer -from gallia.analyzer.extractor import Extractor -from gallia.analyzer.reporter import Reporter -from gallia.analyzer.categorizer import Categorizer -from gallia.analyzer.time_analyzer import TimeAnalyzer -from gallia.analyzer.mode_config import LogMode +try: + import numpy as np + from gallia.analyzer.operator import Operator + from gallia.analyzer.analyzer import Analyzer + from gallia.analyzer.extractor import Extractor + from gallia.analyzer.reporter import Reporter + from gallia.analyzer.categorizer import Categorizer + from gallia.analyzer.time_analyzer import TimeAnalyzer + from gallia.analyzer.mode_config import LogMode + ANALYZER_AVAILABLE = True +except ModuleNotFoundError: + ANALYZER_AVAILABLE = False + from gallia.analyzer.arg_help import ArgHelp from gallia.udscan.core import Script @@ -90,6 +95,12 @@ def add_parser(self) -> None: ) def main(self, args: Namespace) -> None: + if not ANALYZER_AVAILABLE: + self.logger.log_error( + "Please install optional dependencies to run the analyzer" + ) + sys.exit(1) + self.artifacts_dir = self.prepare_artifactsdir(args.data_dir) self.logger.log_preamble(f"Storing artifacts at {self.artifacts_dir}") From 140c685371f66354b29ab89d2c50598b094b4621 Mon Sep 17 00:00:00 2001 From: "Specht, Tobias" Date: Mon, 1 Aug 2022 09:24:37 +0200 Subject: [PATCH 16/26] use penlog for logging --- src/gallia/analyzer/analyzer.py | 66 +++++++++++++++------------ src/gallia/analyzer/categorizer.py | 32 +++++++------ src/gallia/analyzer/db_handler.py | 51 +++++---------------- src/gallia/analyzer/extractor.py | 16 +++---- src/gallia/analyzer/main.py | 7 +-- src/gallia/analyzer/name_config.py | 1 - src/gallia/analyzer/operator.py | 68 ++++++++++++++++++---------- src/gallia/analyzer/reporter.py | 56 +++++++++++++---------- src/gallia/analyzer/time_analyzer.py | 30 +++++++----- src/gallia/analyzer/xl_generator.py | 46 +++++++++++-------- 10 files changed, 206 insertions(+), 167 deletions(-) diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py index 6cd5fab5c..9f61dd740 100644 --- a/src/gallia/analyzer/analyzer.py +++ b/src/gallia/analyzer/analyzer.py @@ -17,6 +17,7 @@ from gallia.analyzer.config import SrcPath from gallia.analyzer.mode_config import LogMode, ScanMode, OpMode from gallia.analyzer.name_config import ColNm, KyNm, TblNm, VwNm, NEG_STR +from gallia.utils import g_repr class Analyzer(Operator): @@ -34,7 +35,6 @@ def __init__( debug_on: bool = False, ): Operator.__init__(self, path, log_mode) - self.msg_head = "[Analyzer] " self.debug_on = debug_on self.debug_dir = artifacts_dir.joinpath("debug") if debug_on: @@ -60,7 +60,7 @@ def analyze_each_run(self, run: int, op_mode: OpMode) -> bool: """ analyze certain run at a given operation mode. """ - self.log(f"analyzing run #{str(run)} from {self.db_path} ...") + self.logger.log_summary(f"analyzing run #{str(run)} from {self.db_path} ...") scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: if not self.reset(TblNm.serv, run): @@ -83,7 +83,7 @@ def reset(self, table_name: str, run: int) -> bool: self.cur.executescript(reset_sql) self.con.commit() except (OperationalError, FileNotFoundError, KeyError) as exc: - self.log("reseting analysis in place failed", True, exc) + self.logger.log_error(f"resetting analysis in place failed: {g_repr(exc)}") return False return True @@ -110,7 +110,9 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: ) analyze_sql += update_sql except KeyError as exc: - self.log("condition key reading failed", True, exc) + self.logger.log_error( + f"condition key reading failed: {g_repr(exc)}" + ) if self.debug_on: path = self.debug_dir.joinpath(f"analyze_serv_{str(run)}.sql") with path.open("w", encoding="utf8") as file: @@ -127,7 +129,9 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: JSONDecodeError, NotImplementedError, ) as exc: - self.log("analyzing scan_service in place failed", True, exc) + self.logger.log_error( + f"analyzing scan_service in place failed: {g_repr(exc)}" + ) return False return True @@ -137,7 +141,9 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: without using data frame direct in data base. """ if op_mode == OpMode.ISO: - self.log("ISO Standard analysis unavailable for scan_identifier.", True) + self.logger.log_warning( + "ISO Standard analysis unavailable for scan_identifier" + ) return False self.prepare_alwd_res() try: @@ -185,8 +191,10 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: analyze_sql += update_sql else: pass - except (KeyError) as exc: - self.log("condition key reading failed", True, exc) + except KeyError as exc: + self.logger.log_error( + f"condition key reading failed: {g_repr(exc)}" + ) drop_view_sql = f""" DROP VIEW IF EXISTS "{VwNm.sess_alwd}"; DROP VIEW IF EXISTS "{VwNm.sbfn_alwd}"; @@ -209,7 +217,9 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: AttributeError, JSONDecodeError, ) as exc: - self.log("analyzing scan_identifier in place failed", True, exc) + self.logger.log_error( + f"analyzing scan_identifier in place failed: {g_repr(exc)}" + ) return False return True @@ -225,8 +235,10 @@ def interpret( cond = "" try: failure = self.fail_name_dict[cond_dict[KyNm.fail]] - except (KeyError) as exc: - self.log("getting failure condition from JSON failed", True, exc) + except KeyError as exc: + self.logger.log_error( + f"getting failure condition from JSON failed: {g_repr(exc)}" + ) return 255, "" if KyNm.match in cond_dict.keys(): @@ -304,11 +316,9 @@ def get_fail_cond_match( f""" AND ({ref_cols}) {neg_str}IN """ + f"""(SELECT({ref_cols} ) FROM "{VwNm.ref_vw}")""" ) - except (KeyError) as exc: - self.log( - f"condition key reading failed at '{neg_str}{KyNm.match}'", - True, - exc, + except KeyError as exc: + self.logger.log_error( + f"condition key reading failed at '{neg_str}{KyNm.match}': {g_repr(exc)}" ) add_cond = "" return cond + " " + textwrap.dedent(add_cond) @@ -326,9 +336,9 @@ def get_fail_cond_resp(self, cond: str, cond_dict: dict, neg: bool = False) -> s else: add_cond += str(self.iso_err_name_dict[resp_name]) + "," add_cond = add_cond[:-1] + ")" - except (KeyError) as exc: - self.log( - f"condition key reading failed at '{neg_str}{KyNm.resd}'", True, exc + except KeyError as exc: + self.logger.log_error( + f"condition key reading failed at '{neg_str}{KyNm.resd}': {g_repr(exc)}" ) add_cond = "" return cond + " " + textwrap.dedent(add_cond) @@ -403,9 +413,9 @@ def get_fail_cond_supp( + f"""FROM "{TblNm.ref_resp}")""" ) cond += add_cond - except (KeyError) as exc: - self.log( - f"condition key reading failed at '{neg_str}{KyNm.supp}'", True, exc + except KeyError as exc: + self.logger.log_error( + f"condition key reading failed at '{neg_str}{KyNm.supp}': {g_repr(exc)}" ) return cond @@ -424,9 +434,9 @@ def get_fail_cond_for_serv( else: add_cond += str(self.iso_serv_name_dict[serv_name]) + "," add_cond = add_cond[:-1] + ")" - except (KeyError) as exc: - self.log( - f"condition key reading failed at '{neg_str}{KyNm.for_serv}'", True, exc + except KeyError as exc: + self.logger.log_error( + f"condition key reading failed at '{neg_str}{KyNm.for_serv}': {g_repr(exc)}" ) return cond return cond + " " + add_cond @@ -455,8 +465,8 @@ def get_fail_cond_known(self, cond: str, cond_dict: dict, neg: bool = False) -> for resp in self.iso_err_code_vec: add_cond += str(resp) + "," cond += add_cond[:-1] + ")" - except (KeyError) as exc: - self.log( - f"condition key reading failed at '{neg_str}{KyNm.known}'", True, exc + except KeyError as exc: + self.logger.log_error( + f"condition key reading failed at '{neg_str}{KyNm.known}': {g_repr(exc)}" ) return cond diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index ee90da5f5..49ffee626 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -18,6 +18,7 @@ from gallia.analyzer.name_config import ColNm, TblNm from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException from gallia.uds.core.constants import UDSIsoServices, UDSErrorCodes +from gallia.utils import g_repr class Categorizer(Analyzer): @@ -30,7 +31,6 @@ def __init__( self, path: str, artifacts_dir: Path, log_mode: LogMode = LogMode.STD_OUT ): Analyzer.__init__(self, path, artifacts_dir, log_mode) - self.msg_head = "[Categorizer] " def analyze_serv(self, run: int, op_mode: OpMode) -> bool: """ @@ -45,7 +45,7 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: if not self.write_db(raw_df, TblNm.serv): return False except (EmptyTableException, ColumnMismatchException, OperationalError) as exc: - self.log("analyzing scan_service failed", True, exc) + self.logger.log_error(f"analyzing scan_service failed: {g_repr(exc)}") return False return True @@ -65,7 +65,7 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: if not self.write_db(raw_df, TblNm.iden): return False except (EmptyTableException, ColumnMismatchException, OperationalError) as exc: - self.log("analyzing scan_identifier failed", True, exc) + self.logger.log_error(f"analyzing scan_identifier failed: {g_repr(exc)}") return False return True @@ -88,8 +88,10 @@ def categorize_serv( lambda x: self.get_fail_serv(op_mode, x[0], x[1], x[2], x[3]) ) raw_df = raw_df.drop([ColNm.combi], axis=1) - except (KeyError) as exc: - self.log("categorizing failures for scan_service failed", True, exc) + except KeyError as exc: + self.logger.log_error( + f"categorizing failures for scan_service failed: {g_repr(exc)}" + ) return pd.DataFrame() return raw_df @@ -105,7 +107,7 @@ def categorize_iden( try: serv_vec = np.unique(raw_df[ColNm.serv]) if not serv_vec.size == 1: - self.log("more than one service in a run", True) + self.logger.log_error("more than one service in a run") return pd.DataFrame() else: serv = serv_vec[0] @@ -127,8 +129,10 @@ def categorize_iden( ) ) raw_df = raw_df.drop([ColNm.combi], axis=1) - except (KeyError) as exc: - self.log("categorizing failures for scan_identifier failed", True, exc) + except KeyError as exc: + self.logger.log_error( + f"categorizing failures for scan_identifier failed: {g_repr(exc)}" + ) return pd.DataFrame() return raw_df @@ -233,7 +237,7 @@ def get_fail_serv( return Failure.OK_SERV_B if not cond_serv_supp: - # normal responses to unsupporeted services + # normal responses to unsupported services if cond_resp_means_not_supp: return Failure.OK_SERV_C @@ -252,7 +256,7 @@ def get_fail_serv( if cond_resp_means_not_supp: return Failure.OK_SERV_E - # Undocumented Type B: supported servcies in not available session responded + # Undocumented Type B: supported services in not available session responded # other than "not supported" family if not cond_resp_means_not_supp: return Failure.UNDOC_SERV_B @@ -262,7 +266,7 @@ def get_fail_serv( if cond_resp_alwd and not cond_resp_means_not_supp: return Failure.OK_SERV_F - # supported servcies (and even in available session) give a response undocumented in ISO + # supported services (and even in available session) give a response undocumented in ISO if not cond_resp_means_not_supp: return Failure.OK_SERV_G @@ -276,7 +280,7 @@ def get_fail_serv( if cond_resp_serv_not_supp: return Failure.MISS_SERV_B - # supported services in available session give a reponded as "subFunctionNotSupported" + # supported services in available session give a responded as "subFunctionNotSupported" if cond_resp_sbfn_not_supp: return Failure.OK_SERV_H @@ -328,7 +332,9 @@ def get_fail_iden( break except (KeyError, AttributeError) as exc: - self.log("getting failure for identifier failed", True, exc) + self.logger.log_error( + f"getting failure for identifier failed: {g_repr(exc)}" + ) return Failure.UNKNOWN if cond_combi: diff --git a/src/gallia/analyzer/db_handler.py b/src/gallia/analyzer/db_handler.py index cf8577d8b..09f398176 100644 --- a/src/gallia/analyzer/db_handler.py +++ b/src/gallia/analyzer/db_handler.py @@ -6,13 +6,14 @@ gallia-analyze Database Handler module """ import os -import sys import sqlite3 from sqlite3 import OperationalError import pandas as pd from pandas.io.sql import DatabaseError from gallia.analyzer.mode_config import LogMode from gallia.analyzer.name_config import ColNm +from gallia.utils import g_repr +from gallia.penlog import Logger class DatabaseHandler: @@ -24,39 +25,11 @@ class DatabaseHandler: def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT) -> None: self.set_db_path(path) self.log_mode = log_mode - self.msg_head = "[DatabaseHandler] " - self.err_head = " " - self.log_file = "logfile.txt" self.con: sqlite3.Connection self.cur: sqlite3.Cursor + self.logger: Logger = Logger("Analyzer") self.connect_db() - def log(self, msg: str = "", err_flag: bool = False, exc: Exception = None) -> None: - """ - print program messages in console or log program messages in log file. - """ - if err_flag: - if exc is None: - total_msg = self.msg_head + self.err_head + msg + "\n" - else: - total_msg = ( - self.msg_head - + self.err_head - + msg - + f": {type(exc).__name__} {str(exc)}" - + "\n" - ) - else: - total_msg = self.msg_head + msg + "\n" - if self.log_mode == LogMode.LOG_FILE: - try: - with open(self.log_file, "a", encoding="utf8") as logfile: - logfile.write(total_msg) - except FileNotFoundError: - sys.stdout.write(total_msg) - if self.log_mode == LogMode.STD_OUT: - sys.stdout.write(total_msg) - def set_db_path(self, path: str = "") -> bool: """ set path for database to read. @@ -75,8 +48,8 @@ def connect_db(self) -> bool: try: self.con = sqlite3.connect(self.db_path) self.cur = self.con.cursor() - except (OperationalError) as exc: - self.log("DB connection failed", True, exc) + except OperationalError as exc: + self.logger.log_error(f"DB connection failed: {g_repr(exc)}") return False return True @@ -101,7 +74,7 @@ def create_table( self.cur.executescript(create_sql) self.con.commit() except (OperationalError, AttributeError) as exc: - self.log("DB creating table failed", True, exc) + self.logger.log_error(f"DB creating table failed: {g_repr(exc)}") return False return True @@ -113,7 +86,7 @@ def clear_table(self, table_name: str) -> bool: self.cur.execute(f"DELETE FROM {table_name}") self.con.commit() except (OperationalError, AttributeError) as exc: - self.log("DB clearing table failed", True, exc) + self.logger.log_error(f"DB clearing table failed: {g_repr(exc)}") return False return True @@ -125,7 +98,7 @@ def delete_table(self, table_name: str) -> bool: self.cur.execute(f"DROP TABLE IF EXISTS {table_name}") self.con.commit() except (OperationalError, AttributeError) as exc: - self.log("DB deleting table failed", True, exc) + self.logger.log_error(f"DB deleting table failed: {g_repr(exc)}") return False return True @@ -137,11 +110,11 @@ def get_df_by_query(self, sql: str, error_on: bool = True) -> pd.DataFrame: raw_df: pd.DataFrame = pd.read_sql_query(sql, self.con) except (DatabaseError, AttributeError) as exc: if error_on: - self.log("DB query failed", True, exc) + self.logger.log_error(f"DB query failed: {g_repr(exc)}") return pd.DataFrame() if raw_df.shape[0] == 0: if error_on: - self.log("no entry in database.", True) + self.logger.log_warning("no entry in database.") return pd.DataFrame() return raw_df @@ -179,7 +152,7 @@ def delete_run_db(self, table_name: str, run: int) -> bool: self.cur.executescript(del_sql) self.con.commit() except (OperationalError, AttributeError) as exc: - self.log("deleting a run from DB failed", True, exc) + self.logger.log_error(f"deleting a run from DB failed: {g_repr(exc)}") return False return True @@ -190,6 +163,6 @@ def write_db(self, raw_df: pd.DataFrame, table_name: str) -> bool: try: raw_df.to_sql(table_name, self.con, if_exists="append", index=False) except (OperationalError, AttributeError) as exc: - self.log("writing data to DB failed", True, exc) + self.logger.log_error(f"writing data to DB failed: {g_repr(exc)}") return False return True diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index b3b9bf5fc..221f5848f 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -12,6 +12,7 @@ from gallia.analyzer.config import TblStruct from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import TblNm, ColNm, VwNm +from gallia.utils import g_repr class Extractor(Operator): @@ -23,7 +24,6 @@ class Extractor(Operator): def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): Operator.__init__(self, path, log_mode) - self.msg_head = "[Extractor] " def extract(self, runs_vec: np.ndarray) -> bool: """ @@ -41,7 +41,7 @@ def extract_each_run(self, run: int) -> bool: extract scan result data from JSON form in the database and save it into relational tables for a certain input run. """ - self.log(f"extracting run #{str(run)} from {self.db_path} ...") + self.logger.log_summary(f"extracting run #{str(run)} from {self.db_path} ...") self.check_boot(run) scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: @@ -99,8 +99,8 @@ def extract_serv(self, run: int) -> bool: try: self.cur.executescript(extract_sql) self.con.commit() - except (OperationalError) as exc: - self.log("extracting scan_service failed", True, exc) + except OperationalError as exc: + self.logger.log_error(f"extracting scan_service failed: {g_repr(exc)}") return False return True @@ -169,8 +169,8 @@ def extract_iden(self, run: int) -> bool: try: self.cur.executescript(extract_sql) self.con.commit() - except (OperationalError) as exc: - self.log("extracting scan_identifier failed", True, exc) + except OperationalError as exc: + self.logger.log_error(f"extracting scan_identifier failed: {g_repr(exc)}") return False return True @@ -188,8 +188,8 @@ def check_boot(self, run: int) -> bool: boot_df[ColNm.boot].apply(lambda x: x in boot_types_vec).all() ) if not boot_ok: - self.log("boot information not complete.", True) + self.logger.log_warning("boot information not complete") except (KeyError, AttributeError, OperationalError) as exc: - self.log("checking boot information failed", True, exc) + self.logger.log_error(f"checking boot information failed: {g_repr(exc)}") return False return boot_ok diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 6ffde8e0d..912432d9d 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -23,6 +23,7 @@ from gallia.analyzer.categorizer import Categorizer from gallia.analyzer.time_analyzer import TimeAnalyzer from gallia.analyzer.mode_config import LogMode + ANALYZER_AVAILABLE = True except ModuleNotFoundError: ANALYZER_AVAILABLE = False @@ -133,7 +134,7 @@ def main(self, args: Namespace) -> None: run_end = run_start + 1 if db_path == "": - print("Please set database path with --source option!") + self.logger.log_error("Please set database path with --source option!") sys.exit() start_time = time.process_time() @@ -195,10 +196,10 @@ def main(self, args: Namespace) -> None: reporter.iterate_all(show_possible_on) else: if service_id == -1: - print("Please input Service ID with --sid option.") + self.logger.log_error("Please input Service ID with --sid option.") else: reporter.consolidate_xl_iden(service_id, show_possible_on) - print( + self.logger.log_summary( f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}" ) diff --git a/src/gallia/analyzer/name_config.py b/src/gallia/analyzer/name_config.py index f9039dfca..354ea682d 100644 --- a/src/gallia/analyzer/name_config.py +++ b/src/gallia/analyzer/name_config.py @@ -57,7 +57,6 @@ class for colunm names in relational tables t_react = "reaction_time" prefix = "$_" infix = "_" - ecu_mode = "ecu_mode" is_err = "is_error" diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index a4c7754c9..4e882d7b1 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -22,17 +22,17 @@ from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException from gallia.analyzer.constants import UDSIsoSessions from gallia.uds.core.constants import UDSErrorCodes, UDSIsoServices +from gallia.utils import g_repr class Operator(DatabaseHandler): """ - Class for common basic operations and utilites such as loading meta data of runs, + Class for common basic operations and utilities such as loading metadata of runs, loading reference dictionaries, getting other information from a certain run in the database. """ def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): DatabaseHandler.__init__(self, path, log_mode) - self.msg_head = "[Operator] " self.num_modes = 0 self.run_meta_df = pd.DataFrame() self.lu_iden_df = pd.DataFrame() @@ -78,7 +78,7 @@ def get_scan_mode(self, run: int) -> ScanMode: if scan_mode_str == "scan-identifiers": return ScanMode.IDEN except (KeyError, IndexingError, AttributeError) as exc: - self.log("getting scan mode failed", True, exc) + self.logger.log_error(f"getting scan mode failed: {g_repr(exc)}") return ScanMode.UNKNOWN return ScanMode.UNKNOWN @@ -88,16 +88,18 @@ def get_sid(self, run: int) -> int: """ try: if self.get_scan_mode(run) != ScanMode.IDEN: - self.log("scan mode is not scan_identifier.", True) + self.logger.log_error("scan mode is not scan_identifier") return -1 raw_df = self.read_run_db(TblNm.iden, run) self.check_df(raw_df, TblStruct.iden) serv_vec = np.unique(raw_df[ColNm.serv]) if serv_vec.shape[0] > 1: - self.log("A run has more than one Service ID.", True) + self.logger.log_warning("A run has more than one Service ID") serv_ser = raw_df[ColNm.serv].mode(dropna=True) if serv_ser.shape[0] > 1: - self.log("A run has more than one most frequent Service ID.", True) + self.logger.log_warning( + "A run has more than one most frequent Service ID" + ) except ( KeyError, IndexingError, @@ -105,7 +107,7 @@ def get_sid(self, run: int) -> int: EmptyTableException, ColumnMismatchException, ) as exc: - self.log("getting Service ID failed", True, exc) + self.logger.log_error(f"getting Service ID failed: {g_repr(exc)}") return -1 return serv_ser[0] @@ -125,7 +127,7 @@ def get_ecu_mode(self, run: int) -> int: ecu_mode = 0 return ecu_mode except (KeyError, IndexingError, AttributeError) as exc: - self.log("getting ECU mode failed", True, exc) + self.logger.log_error(f"getting ECU mode failed: {g_repr(exc)}") return -1 def get_op_mode(self, iso_on: bool) -> OpMode: @@ -153,7 +155,9 @@ def get_sess_lu(self) -> np.ndarray: EmptyTableException, ColumnMismatchException, ) as exc: - self.log("getting sessions in lookup table failed", True, exc) + self.logger.log_error( + f"getting sessions in lookup table failed: {g_repr(exc)}" + ) return np.array([]) return sess_vec @@ -179,7 +183,9 @@ def get_ref_df_from_json(self, path: str) -> pd.DataFrame: FileNotFoundError, JSONDecodeError, ) as exc: - self.log("getting reference summary from JSON failed", True, exc) + self.logger.log_error( + f"getting reference summary from JSON failed: {g_repr(exc)}" + ) return pd.DataFrame() return ref_df @@ -202,7 +208,9 @@ def get_dft_err_df_from_raw(self, raw_df: pd.DataFrame) -> pd.DataFrame: EmptyTableException, ColumnMismatchException, ) as exc: - self.log("getting default error data frame failed", True, exc) + self.logger.log_error( + f"getting default error data frame failed: {g_repr(exc)}" + ) return pd.DataFrame() return dft_err_df @@ -218,7 +226,7 @@ def get_pos_res(self, search_id: int) -> str: res_df = self.get_df_by_query(res_sql) resp = cast(str, res_df.iloc[0, 0]) except (KeyError, IndexingError, AttributeError) as exc: - self.log("getting positive response failed", True, exc) + self.logger.log_error(f"getting positive response failed: {g_repr(exc)}") return "" return resp @@ -253,12 +261,14 @@ def load_meta(self, force: bool = False) -> bool: self.cur.executescript(gen_meta_sql) meta_df = self.read_db(TblNm.meta) if meta_df.shape == (0, 0): - self.log("no meta data", True) + self.logger.log_error("no meta data") return False meta_df.set_index("run_id", inplace=True) self.run_meta_df = meta_df except (KeyError, IndexingError, AttributeError, OperationalError) as exc: - self.log("loading run meta data failed", True, exc) + self.logger.log_error( + f"loading run meta data failed: {g_repr(exc)}", + ) return False return True @@ -324,7 +334,9 @@ def load_ven_lu(self, force: bool = False, num_modes: int = NUM_ECU_MODES) -> bo EmptyTableException, ColumnMismatchException, ) as exc: - self.log("loading vendor-specific reference failed", True, exc) + self.logger.log_error( + f"loading vendor-specific reference failed: {g_repr(exc)}" + ) return False return True @@ -341,7 +353,9 @@ def load_ref_iso(self, force: bool = False) -> bool: self.supp_serv_iso_vec = np.sort(np.array(ref_iso_df.index)) self.ref_iso_df: pd.DataFrame = ref_iso_df.sort_index() except (KeyError, IndexingError, AttributeError) as exc: - self.log("loading reference summary for UDS ISO failed", True, exc) + self.logger.log_error( + f"loading reference summary for UDS ISO failed: {g_repr(exc)}" + ) return False return True @@ -388,7 +402,9 @@ def load_ven_sess(self) -> bool: ) as exc: self.sess_name_dict = self.iso_sess_name_dict self.sess_code_dict = self.iso_sess_code_dict - self.log("loading vendor-specific sessions failed", True, exc) + self.logger.log_error( + f"loading vendor-specific sessions failed: {g_repr(exc)}" + ) return False return True @@ -426,7 +442,9 @@ def load_lu_iden(self, serv: int, ecu_mode: int) -> bool: OperationalError, ) as exc: self.lu_iden_df = pd.DataFrame() - self.log(f"loading lookup for service 0x{serv:02x} failed", True, exc) + self.logger.log_error( + f"loading lookup for service 0x{serv:02x} failed: {g_repr(exc)}" + ) return False return True @@ -435,10 +453,10 @@ def prepare_table(self) -> bool: prepare relational tables to save data for scan_service and scan_identifier. """ if not self.create_table(TblNm.serv, TblStruct.serv): - self.log("preparing table for scan_service failed.", True) + self.logger.log_error("preparing table for scan_service failed") return False if not self.create_table(TblNm.iden, TblStruct.iden): - self.log("preparing table for scan_identifier failed.", True) + self.logger.log_error("preparing table for scan_identifier failed") return False return True @@ -511,7 +529,9 @@ def prepare_alwd_sess_boot( EmptyTableException, ColumnMismatchException, ) as exc: - self.log("preparing table for session and boot failed", True, exc) + self.logger.log_error( + f"preparing table for session and boot failed: {g_repr(exc)}" + ) return False return True @@ -536,7 +556,9 @@ def prepare_alwd( pair_df = pd.DataFrame(pair_ls, columns=[ColNm.serv, col_name]) self.write_db(pair_df, table_name) except (KeyError, IndexError, AttributeError) as exc: - self.log("preparing table for availabilities failed", True, exc) + self.logger.log_error( + f"preparing table for availabilities failed: {g_repr(exc)}" + ) return False return True @@ -558,7 +580,7 @@ def clear(self) -> bool: def clear_alwd(self) -> bool: """ - clear relational tables for reference in the databse. + clear relational tables for reference in the database. """ table_ls = [ TblNm.ref_resp, diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index 8b086993b..a9f93df30 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -18,6 +18,7 @@ from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import ColNm, TblNm from gallia.analyzer.exceptions import ColumnMismatchException, EmptyTableException +from gallia.utils import g_repr class Reporter(Operator): @@ -29,7 +30,6 @@ def __init__( self, path: str, artifacts_dir: Path, log_mode: LogMode = LogMode.STD_OUT ): Operator.__init__(self, path, log_mode) - self.msg_head = "[Reporter] " self.artifacts_dir = artifacts_dir self.abn_serv_vec = np.array([]) self.abn_iden_vec = np.array([]) @@ -52,7 +52,9 @@ def consolidate_xl_serv(self, show_psb: bool = False) -> bool: return False self.load_ven_sess() self.load_ven_lu() - self.log(f"consolidating scan_service by ECU mode from {self.db_path} ...") + self.logger.log_summary( + f"consolidating scan_service by ECU mode from {self.db_path} ..." + ) xl_generator = ExcelGenerator(self.db_path, self.log_mode) xl_is_empty = True for ecu_mode in np.arange(self.num_modes): @@ -73,10 +75,12 @@ def consolidate_xl_serv(self, show_psb: bool = False) -> bool: ColumnMismatchException, AttributeError, ) as exc: - self.log("consolidating scan_service failed", True, exc) + self.logger.log_error( + f"consolidating scan_service failed: {g_repr(exc)}" + ) continue except EmptyTableException: - self.log(f"nothing to report for ECU mode {ecu_mode}.") + self.logger.log_warning(f"nothing to report for ECU mode {ecu_mode}.") if xl_is_empty: return False out_path = self.get_path( @@ -92,20 +96,20 @@ def consolidate_xl_iden(self, serv: int, show_psb: bool = False) -> bool: for a certain given service into one EXCEL file. """ if serv not in self.iso_serv_by_iden_vec: - self.log("given Service ID is not service by identifier.") + self.logger.log_error("given Service ID is not service by identifier.") return False if not self.load_meta(force=True): return False self.load_ven_sess() self.load_ven_lu() - self.log( + self.logger.log_summary( f"consolidating for Service ID 0x{serv:02X} {self.iso_serv_code_dict[serv]} from {self.db_path} ..." ) xl_generator = ExcelGenerator(self.db_path, self.log_mode) xl_is_empty = True if self.num_modes == 0: num_modes = NUM_ECU_MODES - self.log( + self.logger.log_warning( f"no information about ECU modes. trying {NUM_ECU_MODES} mode(s)..." ) else: @@ -128,12 +132,14 @@ def consolidate_xl_iden(self, serv: int, show_psb: bool = False) -> bool: ColumnMismatchException, AttributeError, ) as exc: - self.log("consolidating scan_identifier failed", True, exc) + self.logger.log_error( + f"consolidating scan_identifier failed: {g_repr(exc)}" + ) continue except EmptyTableException: - self.log(f"nothing to report for ECU mode {ecu_mode}.") + self.logger.log_error(f"nothing to report for ECU mode {ecu_mode}.") if xl_is_empty: - self.log(f"nothing to report for Service ID 0x{serv:02X}") + self.logger.log_info(f"nothing to report for Service ID 0x{serv:02X}") return False out_path = self.get_path( f"0x{serv:02X}_{self.iso_serv_code_dict[serv]}", @@ -164,7 +170,7 @@ def report_xl_each_run(self, run: int, show_psb: bool = False) -> bool: """ generate EXCEL report for a certain run. """ - self.log(f"reporting run #{str(run)} from {self.db_path} ...") + self.logger.log_summary(f"reporting run #{str(run)} from {self.db_path} ...") scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: return self.report_xl_serv(run, show_psb) @@ -190,7 +196,7 @@ def report_xl_serv(self, run: int, show_psb: bool = False) -> bool: if not xl_generator.save_close_xl(out_path): return False except (EmptyTableException, ColumnMismatchException, AttributeError) as exc: - self.log("reporting scan_service failed", True, exc) + self.logger.log_error(f"reporting scan_service failed: {g_repr(exc)}") return False return True @@ -212,7 +218,7 @@ def report_xl_iden(self, run: int, show_psb: bool = False) -> bool: if not xl_generator.save_close_xl(out_path): return False except (EmptyTableException, ColumnMismatchException, AttributeError) as exc: - self.log("reporting scan_identifier failed", True, exc) + self.logger.log_error(f"reporting scan_identifier failed: {g_repr(exc)}") return False return True @@ -226,12 +232,12 @@ def get_path( out_path = self.artifacts_dir.joinpath(f"{suffix}{ext}") if out_path.is_file() and rm_if_exists: os.remove(out_path) - self.log(f"existing file removed from {out_path}") + self.logger.log_info(f"existing file removed from {out_path}") return str(out_path) def get_entries_oi(self, scan_mode: ScanMode, show_psb: bool = False) -> np.ndarray: """ - get services or identifieres of interest to display in summary sheet. + get services or identifiers of interest to display in summary sheet. """ if show_psb: if scan_mode == ScanMode.SERV: @@ -247,7 +253,7 @@ def get_entries_oi(self, scan_mode: ScanMode, show_psb: bool = False) -> np.ndar def load_sid_oi(self, run: int, ecu_mode: int = -1) -> bool: """ - load services of interest for a given input run. + load services of interest in a given input run. """ try: raw_df = self.read_run_db(TblNm.serv, run) @@ -255,7 +261,7 @@ def load_sid_oi(self, run: int, ecu_mode: int = -1) -> bool: if not self.load_sid_oi_from_df(raw_df, ecu_mode): return False except (EmptyTableException, ColumnMismatchException) as exc: - self.log("loading services of interest failed", True, exc) + self.logger.log_error(f"loading services of interest failed: {g_repr(exc)}") return False return True @@ -283,13 +289,15 @@ def load_sid_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool: cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode self.abn_serv_vec = np.sort(np.unique(raw_df.loc[cond_abn, ColNm.serv])) except (KeyError, IndexingError, AttributeError) as exc: - self.log("loading services of interest from data frame failed", True, exc) + self.logger.log_error( + f"loading services of interest from data frame failed: {g_repr(exc)}" + ) return False return True def load_iden_oi(self, run: int, ecu_mode: int = -1) -> bool: """ - load identifiers of interest for a given input run. + load identifiers of interest in a given input run. """ try: raw_df = self.read_run_db(TblNm.iden, run) @@ -297,7 +305,9 @@ def load_iden_oi(self, run: int, ecu_mode: int = -1) -> bool: if not self.load_iden_oi_from_df(raw_df, ecu_mode): return False except (EmptyTableException, ColumnMismatchException) as exc: - self.log("loading identifiers of interest failed", True, exc) + self.logger.log_error( + f"loading identifiers of interest failed: {g_repr(exc)}" + ) return False return True @@ -308,7 +318,7 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool try: serv_vec = np.sort(np.unique(raw_df[ColNm.serv])) if not serv_vec.size == 1: - self.log("more than one service in a run", True) + self.logger.log_error("more than one service in a run") return False dft_err_df = self.get_dft_err_df_from_raw(raw_df) dft_err_ser: pd.Series = dft_err_df.loc[ColNm.dft] @@ -328,8 +338,8 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode self.abn_iden_vec = np.sort(np.unique(raw_df.loc[cond_abn, ColNm.iden])) except (KeyError, IndexingError, AttributeError) as exc: - self.log( - "loading identifiers of interest from data frame failed", True, exc + self.logger.log_error( + f"loading identifiers of interest from data frame failed: {g_repr(exc)}" ) return False return True diff --git a/src/gallia/analyzer/time_analyzer.py b/src/gallia/analyzer/time_analyzer.py index 94e2061a2..d5a3c8462 100644 --- a/src/gallia/analyzer/time_analyzer.py +++ b/src/gallia/analyzer/time_analyzer.py @@ -17,6 +17,7 @@ from gallia.analyzer.config import PltDesign, TblStruct, SrcPath, DFT_T_PREC from gallia.analyzer.mode_config import ScanMode, LogMode from gallia.analyzer.name_config import ColNm, TblNm, KyNm +from gallia.utils import g_repr class TimeAnalyzer(Reporter): @@ -32,7 +33,6 @@ def __init__( log_mode: LogMode = LogMode.STD_OUT, ): Reporter.__init__(self, path, artifacts_dir, log_mode) - self.msg_head = "[TimeAnalyzer] " self.t_prec = t_prec self.jpg_ext = ".jpg" self.csv_ext = ".csv" @@ -49,7 +49,9 @@ def extract_tra_each_run(self, run: int) -> bool: """ extract reaction times of each run. """ - self.log(f"extracting time for run #{str(run)} from {self.db_path} ...") + self.logger.log_summary( + f"extracting time for run #{str(run)} from {self.db_path} ..." + ) scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: tbl_nm = TblNm.serv @@ -89,7 +91,9 @@ def extract_tra_each_run(self, run: int) -> bool: EmptyTableException, ColumnMismatchException, ) as exc: - self.log(f"extracting reaction time for run #{run} failed", True, exc) + self.logger.log_error( + f"extracting reaction time for run #{run} failed: {g_repr(exc)}" + ) return False return True @@ -105,7 +109,9 @@ def plot_tra_each_run(self, run: int) -> bool: """ plot reaction time for each run. """ - self.log(f"plotting reaction time for run #{str(run)} from {self.db_path} ...") + self.logger.log_summary( + f"plotting reaction time for run #{str(run)} from {self.db_path} ..." + ) scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: self.plot_tra_serv(run) @@ -146,8 +152,8 @@ def plot_tra_serv(self, run: int) -> bool: plt.cla() plt.close() except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: - self.log( - f"plotting service ID and reaction time in run #{run} failed", True, exc + self.logger.log_error( + f"plotting service ID and reaction time in run #{run} failed: {g_repr(exc)}" ) return False return True @@ -185,8 +191,8 @@ def plot_tra_iden(self, run: int) -> bool: plt.cla() plt.close() except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: - self.log( - f"plotting identifier and reaction time in run #{run} failed", True, exc + self.logger.log_error( + f"plotting identifier and reaction time in run #{run} failed: {g_repr(exc)}" ) return False return True @@ -203,7 +209,9 @@ def hist_tra_each_run(self, run: int) -> bool: """ create a histogram of reaction time for a given run. """ - self.log(f"creating a histogram for run #{str(run)} from {self.db_path} ...") + self.logger.log_summary( + f"creating a histogram for run #{str(run)} from {self.db_path} ..." + ) try: raw_df = pd.read_csv(self.get_path(f"time_run{run:02}", self.csv_ext)) plt.style.use(PltDesign.hist_style) @@ -219,8 +227,8 @@ def hist_tra_each_run(self, run: int) -> bool: plt.cla() plt.close() except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: - self.log( - f"establishing histogram of identifiers in run #{run} failed", True, exc + self.logger.log_error( + f"establishing histogram of identifiers in run #{run} failed: {g_repr(exc)}" ) return False return True diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index 6967559fd..63afe9f54 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -26,6 +26,7 @@ from gallia.analyzer.failure import Failure from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import ColNm, ShtNm, CellCnt, KyNm +from gallia.utils import g_repr class ExcelGenerator(Operator): @@ -35,7 +36,6 @@ class ExcelGenerator(Operator): def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT): Operator.__init__(self, path, log_mode) - self.msg_head = "[ExcelGenerator] " self.workbook: op.Workbook = op.Workbook() self.worksheet: Any self.load_color_code(SrcPath.err_src) @@ -70,7 +70,7 @@ def write_xl( if len(self.workbook.worksheets) == 0: self.workbook.create_sheet(ShtNm.init) except (SheetTitleException, ReadOnlyWorkbookException) as exc: - self.log("generating EXCEL failed", True, exc) + self.logger.log_error(f"generating EXCEL failed: {g_repr(exc)}") return False return True @@ -79,7 +79,7 @@ def save_close_xl(self, out_path: str) -> bool: self.workbook.save(out_path) self.workbook.close() except (InvalidFileException, WorkbookAlreadySaved) as exc: - self.log("saving EXCEL failed", True, exc) + self.logger.log_error(f"saving EXCEL failed: {g_repr(exc)}") return False return True @@ -113,7 +113,7 @@ def add_sum_sheet_serv( ScanMode.SERV, ) except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: - self.log("adding summary sheet failed", True, exc) + self.logger.log_error(f"adding summary sheet failed: {g_repr(exc)}") return False return True @@ -149,7 +149,7 @@ def add_sum_sheet_iden( ScanMode.IDEN, ) except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: - self.log("adding summary sheet failed", True, exc) + self.logger.log_error(f"adding summary sheet failed: {g_repr(exc)}") return False return True @@ -188,7 +188,9 @@ def sum_sheet_fill_origin( self.start_row + 1, self.start_col + 1 ).coordinate except (KeyError, AttributeError) as exc: - self.log("filling origin cell of summary sheet failed", True, exc) + self.logger.log_error( + f"filling origin cell of summary sheet failed: {g_repr(exc)}" + ) return self.start_row, self.start_col return cur_row, cur_col @@ -230,7 +232,7 @@ def sum_sheet_fill_index( self.worksheet.cell(cur_row, self.start_col).value = index_name cur_row += 1 if row.subfunc != -1: - # service has subfunction and identifer + # service has subfunction and identifier self.worksheet.cell( cur_row, self.start_col + 1 ).value = row.subfunc @@ -240,7 +242,9 @@ def sum_sheet_fill_index( name=XlDesign.font_index ) except (KeyError, AttributeError) as exc: - self.log("filling index of summary sheet failed", True, exc) + self.logger.log_error( + f"filling index of summary sheet failed: {g_repr(exc)}" + ) return self.start_row, self.start_col + 1 cur_col += int(has_id) + int(has_sub_func) @@ -281,7 +285,9 @@ def sum_sheet_fill_sess( cur_col -= sess_num cur_row = self.start_row + 2 except (KeyError, IndexingError, AttributeError) as exc: - self.log("filling top session row of summary sheet failed", True, exc) + self.logger.log_error( + f"filling top session row of summary sheet failed: {g_repr(exc)}" + ) return self.start_row + 1, self.start_col + 1 return cur_row, cur_col @@ -338,7 +344,9 @@ def sum_sheet_fill_resp( cur_col += 1 cur_row = self.start_row + 2 except (KeyError, IndexingError, AttributeError) as exc: - self.log("filling response field of summary sheet failed", True, exc) + self.logger.log_error( + f"filling response field of summary sheet failed: {g_repr(exc)}" + ) return self.start_row + 1, self.start_col + 1 return cur_row, cur_col @@ -349,7 +357,7 @@ def add_failure_sheet( add failure(undocumented or missing) sheet to report EXCEL file. """ if scan_mode == ScanMode.UNKNOWN: - self.log("adding summary sheet failed: scan mode unknown.") + self.logger.log_error("adding summary sheet failed: scan mode unknown.") return False try: dft_err_df = self.get_dft_err_df_from_raw(raw_df) @@ -452,7 +460,9 @@ def add_failure_sheet( cur_row = self.start_row cur_col = self.start_col except (KeyError, IndexingError, AttributeError, SheetTitleException) as exc: - self.log("adding failure summary sheets failed", True, exc) + self.logger.log_error( + f"adding failure summary sheets failed: {g_repr(exc)}" + ) return False return True @@ -468,7 +478,7 @@ def load_color_code(self, path: str) -> bool: for color_code in color_code_ls } except (FileNotFoundError, KeyError, JSONDecodeError) as exc: - self.log("loading color codes failed", True, exc) + self.logger.log_error(f"loading color codes failed: {g_repr(exc)}") return False return True @@ -479,7 +489,7 @@ def set_cell_width(self, col: int, width: int) -> bool: try: self.worksheet.column_dimensions[get_column_letter(col)].width = width except (KeyError, AttributeError) as exc: - self.log("setting cell width failed", True, exc) + self.logger.log_error(f"setting cell width failed: {g_repr(exc)}") return False return True @@ -490,7 +500,7 @@ def set_cell_height(self, row: int, height: int) -> bool: try: self.worksheet.row_dimensions[row].height = height except (KeyError, AttributeError) as exc: - self.log("setting cell height failed", True, exc) + self.logger.log_error(f"setting cell height failed: {g_repr(exc)}") return False return True @@ -505,7 +515,7 @@ def fill_cell(self, row: int, col: int, error: int) -> bool: fill_type="solid", ) except (KeyError, AttributeError) as exc: - self.log("filling cell failed", True, exc) + self.logger.log_error(f"filling cell failed: {g_repr(exc)}") return False return True @@ -517,7 +527,7 @@ def check_fail(self, fail: int, fail_class: Failure) -> bool: def get_code_text(self, code: int, ref: Dict[int, str]) -> str: """ - get combined string of hex code and correspoding name + get combined string of hex code and corresponding name with a given code and a given dictionary. """ try: @@ -532,7 +542,7 @@ def get_code_text(self, code: int, ref: Dict[int, str]) -> str: def get_err_rgb(self, error: int) -> str: """ - get RGB color code string for an error reponse. + get RGB color code string for an error response. """ try: return "00" + self.color_code_dict[error] From 6c932b992bb1ab80e7b70ed5363fb3efb58b06e9 Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Wed, 24 Jul 2024 12:58:28 +0200 Subject: [PATCH 17/26] Adapt to new gallia version --- poetry.lock | 42 +++++++++++++++++++++++++- pyproject.toml | 3 +- src/gallia/analyzer/analyzer.py | 28 +++++++++--------- src/gallia/analyzer/categorizer.py | 16 +++++----- src/gallia/analyzer/db_handler.py | 26 ++++++++-------- src/gallia/analyzer/extractor.py | 12 ++++---- src/gallia/analyzer/main.py | 30 ++++++++++++------- src/gallia/analyzer/operator.py | 44 ++++++++++++++-------------- src/gallia/analyzer/reporter.py | 38 ++++++++++++------------ src/gallia/analyzer/time_analyzer.py | 16 +++++----- src/gallia/analyzer/xl_generator.py | 32 ++++++++++---------- src/gallia/cli.py | 6 ++++ src/gallia/commands/__init__.py | 3 ++ 13 files changed, 178 insertions(+), 118 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8a9399c3f..5823e21cf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1391,6 +1391,24 @@ pytz = ">=2020.1" [package.extras] test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] +[[package]] +name = "pandas-stubs" +version = "2.2.2.240603" +description = "Type annotations for pandas" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas_stubs-2.2.2.240603-py3-none-any.whl", hash = "sha256:e08ce7f602a4da2bff5a67475ba881c39f2a4d4f7fccc1cba57c6f35a379c6c0"}, + {file = "pandas_stubs-2.2.2.240603.tar.gz", hash = "sha256:2dcc86e8fa6ea41535a4561c1f08b3942ba5267b464eff2e99caeee66f9e4cd1"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.23.5", markers = "python_version >= \"3.9\" and python_version < \"3.12\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\" and python_version < \"3.13\""}, +] +types-pytz = ">=2022.1.1" + [[package]] name = "parso" version = "0.8.4" @@ -2411,6 +2429,17 @@ files = [ {file = "types_aiofiles-24.1.0.20240626-py3-none-any.whl", hash = "sha256:7939eca4a8b4f9c6491b6e8ef160caee9a21d32e18534a57d5ed90aee47c66b4"}, ] +[[package]] +name = "types-openpyxl" +version = "3.1.5.20240719" +description = "Typing stubs for openpyxl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-openpyxl-3.1.5.20240719.tar.gz", hash = "sha256:46c7167e0a2998b7d76d9d9f25eb7bca3f94409dbeddabdc5c9ff93c1e49f503"}, + {file = "types_openpyxl-3.1.5.20240719-py3-none-any.whl", hash = "sha256:b629c8c4f22bf9395eac59698ae320159d5090080d757d0950852dacae93fb0b"}, +] + [[package]] name = "types-psutil" version = "6.0.0.20240621" @@ -2422,6 +2451,17 @@ files = [ {file = "types_psutil-6.0.0.20240621-py3-none-any.whl", hash = "sha256:b02f05d2c4141cd5926d82d8b56e4292a4d8f483d8a3400b73edf153834a3c64"}, ] +[[package]] +name = "types-pytz" +version = "2024.1.0.20240417" +description = "Typing stubs for pytz" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-pytz-2024.1.0.20240417.tar.gz", hash = "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981"}, + {file = "types_pytz-2024.1.0.20240417-py3-none-any.whl", hash = "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659"}, +] + [[package]] name = "types-tabulate" version = "0.9.0.20240106" @@ -2742,4 +2782,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.13" -content-hash = "ee12212dfe616d2d660c8b6fe01a946422e9164388e351005ee53623252ab1e4" +content-hash = "f66f146f73a9a0fa574e72b02ff66f79e17c9b04adf64ef605c1bf9e46c55969" diff --git a/pyproject.toml b/pyproject.toml index 3bdee71c9..31e44d77d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,8 @@ numpy = "^1.21.4" openpyxl = "^3.0.9" pandas = "^1.3.4" matplotlib = "^3.4.3" +pandas-stubs = "^2.2.2.240603" +types-openpyxl = "^3.1.5.20240719" [tool.poetry.group.dev.dependencies] Sphinx = ">=5.2,<8.0" @@ -80,7 +82,6 @@ openpyxl-stubs = "^0.1.21" "netzteil" = "opennetzteil.cli:main" "cursed-hr" = "cursed_hr.cursed_hr:main" "hr" = "hr:main" -"analyze" = "gallia.analyzer.main:AnalyzerMain" [tool.mypy] strict = true diff --git a/src/gallia/analyzer/analyzer.py b/src/gallia/analyzer/analyzer.py index 9f61dd740..72eb5ad2d 100644 --- a/src/gallia/analyzer/analyzer.py +++ b/src/gallia/analyzer/analyzer.py @@ -17,7 +17,7 @@ from gallia.analyzer.config import SrcPath from gallia.analyzer.mode_config import LogMode, ScanMode, OpMode from gallia.analyzer.name_config import ColNm, KyNm, TblNm, VwNm, NEG_STR -from gallia.utils import g_repr +from gallia.services.uds.core.utils import g_repr class Analyzer(Operator): @@ -60,7 +60,7 @@ def analyze_each_run(self, run: int, op_mode: OpMode) -> bool: """ analyze certain run at a given operation mode. """ - self.logger.log_summary(f"analyzing run #{str(run)} from {self.db_path} ...") + self.logger.result(f"analyzing run #{str(run)} from {self.db_path} ...") scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: if not self.reset(TblNm.serv, run): @@ -83,7 +83,7 @@ def reset(self, table_name: str, run: int) -> bool: self.cur.executescript(reset_sql) self.con.commit() except (OperationalError, FileNotFoundError, KeyError) as exc: - self.logger.log_error(f"resetting analysis in place failed: {g_repr(exc)}") + self.logger.error(f"resetting analysis in place failed: {g_repr(exc)}") return False return True @@ -110,7 +110,7 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: ) analyze_sql += update_sql except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed: {g_repr(exc)}" ) if self.debug_on: @@ -129,7 +129,7 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: JSONDecodeError, NotImplementedError, ) as exc: - self.logger.log_error( + self.logger.error( f"analyzing scan_service in place failed: {g_repr(exc)}" ) return False @@ -141,7 +141,7 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: without using data frame direct in data base. """ if op_mode == OpMode.ISO: - self.logger.log_warning( + self.logger.warning( "ISO Standard analysis unavailable for scan_identifier" ) return False @@ -192,7 +192,7 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: else: pass except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed: {g_repr(exc)}" ) drop_view_sql = f""" @@ -217,7 +217,7 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: AttributeError, JSONDecodeError, ) as exc: - self.logger.log_error( + self.logger.error( f"analyzing scan_identifier in place failed: {g_repr(exc)}" ) return False @@ -236,7 +236,7 @@ def interpret( try: failure = self.fail_name_dict[cond_dict[KyNm.fail]] except KeyError as exc: - self.logger.log_error( + self.logger.error( f"getting failure condition from JSON failed: {g_repr(exc)}" ) return 255, "" @@ -317,7 +317,7 @@ def get_fail_cond_match( + f"""(SELECT({ref_cols} ) FROM "{VwNm.ref_vw}")""" ) except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed at '{neg_str}{KyNm.match}': {g_repr(exc)}" ) add_cond = "" @@ -337,7 +337,7 @@ def get_fail_cond_resp(self, cond: str, cond_dict: dict, neg: bool = False) -> s add_cond += str(self.iso_err_name_dict[resp_name]) + "," add_cond = add_cond[:-1] + ")" except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed at '{neg_str}{KyNm.resd}': {g_repr(exc)}" ) add_cond = "" @@ -414,7 +414,7 @@ def get_fail_cond_supp( ) cond += add_cond except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed at '{neg_str}{KyNm.supp}': {g_repr(exc)}" ) return cond @@ -435,7 +435,7 @@ def get_fail_cond_for_serv( add_cond += str(self.iso_serv_name_dict[serv_name]) + "," add_cond = add_cond[:-1] + ")" except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed at '{neg_str}{KyNm.for_serv}': {g_repr(exc)}" ) return cond @@ -466,7 +466,7 @@ def get_fail_cond_known(self, cond: str, cond_dict: dict, neg: bool = False) -> add_cond += str(resp) + "," cond += add_cond[:-1] + ")" except KeyError as exc: - self.logger.log_error( + self.logger.error( f"condition key reading failed at '{neg_str}{KyNm.known}': {g_repr(exc)}" ) return cond diff --git a/src/gallia/analyzer/categorizer.py b/src/gallia/analyzer/categorizer.py index 49ffee626..197115da5 100644 --- a/src/gallia/analyzer/categorizer.py +++ b/src/gallia/analyzer/categorizer.py @@ -17,8 +17,8 @@ from gallia.analyzer.mode_config import LogMode, OpMode from gallia.analyzer.name_config import ColNm, TblNm from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException -from gallia.uds.core.constants import UDSIsoServices, UDSErrorCodes -from gallia.utils import g_repr +from gallia.services.uds.core.constants import UDSIsoServices, UDSErrorCodes +from gallia.services.uds.core.utils import g_repr class Categorizer(Analyzer): @@ -45,7 +45,7 @@ def analyze_serv(self, run: int, op_mode: OpMode) -> bool: if not self.write_db(raw_df, TblNm.serv): return False except (EmptyTableException, ColumnMismatchException, OperationalError) as exc: - self.logger.log_error(f"analyzing scan_service failed: {g_repr(exc)}") + self.logger.error(f"analyzing scan_service failed: {g_repr(exc)}") return False return True @@ -65,7 +65,7 @@ def analyze_iden(self, run: int, op_mode: OpMode) -> bool: if not self.write_db(raw_df, TblNm.iden): return False except (EmptyTableException, ColumnMismatchException, OperationalError) as exc: - self.logger.log_error(f"analyzing scan_identifier failed: {g_repr(exc)}") + self.logger.error(f"analyzing scan_identifier failed: {g_repr(exc)}") return False return True @@ -89,7 +89,7 @@ def categorize_serv( ) raw_df = raw_df.drop([ColNm.combi], axis=1) except KeyError as exc: - self.logger.log_error( + self.logger.error( f"categorizing failures for scan_service failed: {g_repr(exc)}" ) return pd.DataFrame() @@ -107,7 +107,7 @@ def categorize_iden( try: serv_vec = np.unique(raw_df[ColNm.serv]) if not serv_vec.size == 1: - self.logger.log_error("more than one service in a run") + self.logger.error("more than one service in a run") return pd.DataFrame() else: serv = serv_vec[0] @@ -130,7 +130,7 @@ def categorize_iden( ) raw_df = raw_df.drop([ColNm.combi], axis=1) except KeyError as exc: - self.logger.log_error( + self.logger.error( f"categorizing failures for scan_identifier failed: {g_repr(exc)}" ) return pd.DataFrame() @@ -332,7 +332,7 @@ def get_fail_iden( break except (KeyError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"getting failure for identifier failed: {g_repr(exc)}" ) return Failure.UNKNOWN diff --git a/src/gallia/analyzer/db_handler.py b/src/gallia/analyzer/db_handler.py index 09f398176..087fe91c9 100644 --- a/src/gallia/analyzer/db_handler.py +++ b/src/gallia/analyzer/db_handler.py @@ -12,8 +12,8 @@ from pandas.io.sql import DatabaseError from gallia.analyzer.mode_config import LogMode from gallia.analyzer.name_config import ColNm -from gallia.utils import g_repr -from gallia.penlog import Logger +from gallia.services.uds.core.utils import g_repr +from gallia.log import get_logger class DatabaseHandler: @@ -22,12 +22,12 @@ class DatabaseHandler: Used for database connection, reading and writing data and log. """ - def __init__(self, path: str = "", log_mode: LogMode = LogMode.STD_OUT) -> None: + def __init__(self, path: str = "", mode: LogMode = LogMode.STD_OUT) -> None: self.set_db_path(path) - self.log_mode = log_mode + self.mode = mode self.con: sqlite3.Connection self.cur: sqlite3.Cursor - self.logger: Logger = Logger("Analyzer") + self.logger = get_logger(__file__) self.connect_db() def set_db_path(self, path: str = "") -> bool: @@ -49,7 +49,7 @@ def connect_db(self) -> bool: self.con = sqlite3.connect(self.db_path) self.cur = self.con.cursor() except OperationalError as exc: - self.logger.log_error(f"DB connection failed: {g_repr(exc)}") + self.logger.error(f"DB connection failed: {g_repr(exc)}") return False return True @@ -74,7 +74,7 @@ def create_table( self.cur.executescript(create_sql) self.con.commit() except (OperationalError, AttributeError) as exc: - self.logger.log_error(f"DB creating table failed: {g_repr(exc)}") + self.logger.error(f"DB creating table failed: {g_repr(exc)}") return False return True @@ -86,7 +86,7 @@ def clear_table(self, table_name: str) -> bool: self.cur.execute(f"DELETE FROM {table_name}") self.con.commit() except (OperationalError, AttributeError) as exc: - self.logger.log_error(f"DB clearing table failed: {g_repr(exc)}") + self.logger.error(f"DB clearing table failed: {g_repr(exc)}") return False return True @@ -98,7 +98,7 @@ def delete_table(self, table_name: str) -> bool: self.cur.execute(f"DROP TABLE IF EXISTS {table_name}") self.con.commit() except (OperationalError, AttributeError) as exc: - self.logger.log_error(f"DB deleting table failed: {g_repr(exc)}") + self.logger.error(f"DB deleting table failed: {g_repr(exc)}") return False return True @@ -110,11 +110,11 @@ def get_df_by_query(self, sql: str, error_on: bool = True) -> pd.DataFrame: raw_df: pd.DataFrame = pd.read_sql_query(sql, self.con) except (DatabaseError, AttributeError) as exc: if error_on: - self.logger.log_error(f"DB query failed: {g_repr(exc)}") + self.logger.error(f"DB query failed: {g_repr(exc)}") return pd.DataFrame() if raw_df.shape[0] == 0: if error_on: - self.logger.log_warning("no entry in database.") + self.logger.warning("no entry in database.") return pd.DataFrame() return raw_df @@ -152,7 +152,7 @@ def delete_run_db(self, table_name: str, run: int) -> bool: self.cur.executescript(del_sql) self.con.commit() except (OperationalError, AttributeError) as exc: - self.logger.log_error(f"deleting a run from DB failed: {g_repr(exc)}") + self.logger.error(f"deleting a run from DB failed: {g_repr(exc)}") return False return True @@ -163,6 +163,6 @@ def write_db(self, raw_df: pd.DataFrame, table_name: str) -> bool: try: raw_df.to_sql(table_name, self.con, if_exists="append", index=False) except (OperationalError, AttributeError) as exc: - self.logger.log_error(f"writing data to DB failed: {g_repr(exc)}") + self.logger.error(f"writing data to DB failed: {g_repr(exc)}") return False return True diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index 221f5848f..06bb9ffd4 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -12,7 +12,7 @@ from gallia.analyzer.config import TblStruct from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import TblNm, ColNm, VwNm -from gallia.utils import g_repr +from gallia.services.uds.core.utils import g_repr class Extractor(Operator): @@ -41,7 +41,7 @@ def extract_each_run(self, run: int) -> bool: extract scan result data from JSON form in the database and save it into relational tables for a certain input run. """ - self.logger.log_summary(f"extracting run #{str(run)} from {self.db_path} ...") + self.logger.result(f"extracting run #{str(run)} from {self.db_path} ...") self.check_boot(run) scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: @@ -100,7 +100,7 @@ def extract_serv(self, run: int) -> bool: self.cur.executescript(extract_sql) self.con.commit() except OperationalError as exc: - self.logger.log_error(f"extracting scan_service failed: {g_repr(exc)}") + self.logger.error(f"extracting scan_service failed: {g_repr(exc)}") return False return True @@ -170,7 +170,7 @@ def extract_iden(self, run: int) -> bool: self.cur.executescript(extract_sql) self.con.commit() except OperationalError as exc: - self.logger.log_error(f"extracting scan_identifier failed: {g_repr(exc)}") + self.logger.error(f"extracting scan_identifier failed: {g_repr(exc)}") return False return True @@ -188,8 +188,8 @@ def check_boot(self, run: int) -> bool: boot_df[ColNm.boot].apply(lambda x: x in boot_types_vec).all() ) if not boot_ok: - self.logger.log_warning("boot information not complete") + self.logger.warning("boot information not complete") except (KeyError, AttributeError, OperationalError) as exc: - self.logger.log_error(f"checking boot information failed: {g_repr(exc)}") + self.logger.error(f"checking boot information failed: {g_repr(exc)}") return False return boot_ok diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 912432d9d..ccd264e87 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -29,7 +29,10 @@ ANALYZER_AVAILABLE = False from gallia.analyzer.arg_help import ArgHelp -from gallia.udscan.core import Script +from gallia.command.base import Script +from gallia.log import get_logger +from argparse import ArgumentParser +from gallia.config import Config # ========================================================== # # [Rule for arguments] @@ -42,9 +45,16 @@ class AnalyzerMain(Script): - def __init__(self) -> None: - super().__init__() + """Analyzer""" + + GROUP = "analyzer" + COMMAND = "run" + SHORT_HELP = "request VIN" + + def __init__(self, parser: ArgumentParser, config: Config = Config()) -> None: + super().__init__(parser, config) self.artifacts_dir: Path + self.logger = get_logger(__file__) def prepare_artifactsdir(self, path: Optional[Path]) -> Path: if path is None: @@ -58,10 +68,10 @@ def prepare_artifactsdir(self, path: Optional[Path]) -> Path: if path.is_dir(): return path - self.logger.log_error(f"Data directory {path} is not an existing directory.") + self.logger.error(f"Data directory {path} is not an existing directory.") sys.exit(1) - def add_parser(self) -> None: + def configure_parser(self) -> None: # Commands grp_cmd = self.parser.add_argument_group("Command") grp_cmd.add_argument("-a", action="store_true", help=ArgHelp.analyze) @@ -97,13 +107,13 @@ def add_parser(self) -> None: def main(self, args: Namespace) -> None: if not ANALYZER_AVAILABLE: - self.logger.log_error( + self.logger.error( "Please install optional dependencies to run the analyzer" ) sys.exit(1) self.artifacts_dir = self.prepare_artifactsdir(args.data_dir) - self.logger.log_preamble(f"Storing artifacts at {self.artifacts_dir}") + self.logger.result(f"Storing artifacts at {self.artifacts_dir}") args = vars(args) # Commands @@ -134,7 +144,7 @@ def main(self, args: Namespace) -> None: run_end = run_start + 1 if db_path == "": - self.logger.log_error("Please set database path with --source option!") + self.logger.error("Please set database path with --source option!") sys.exit() start_time = time.process_time() @@ -196,10 +206,10 @@ def main(self, args: Namespace) -> None: reporter.iterate_all(show_possible_on) else: if service_id == -1: - self.logger.log_error("Please input Service ID with --sid option.") + self.logger.error("Please input Service ID with --sid option.") else: reporter.consolidate_xl_iden(service_id, show_possible_on) - self.logger.log_summary( + self.logger.result( f"gallia-analyze: elapsed time(sec): {str(time.process_time() - start_time)}" ) diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 4e882d7b1..7da578ec9 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -21,8 +21,8 @@ from gallia.analyzer.mode_config import LogMode, ScanMode, OpMode from gallia.analyzer.exceptions import EmptyTableException, ColumnMismatchException from gallia.analyzer.constants import UDSIsoSessions -from gallia.uds.core.constants import UDSErrorCodes, UDSIsoServices -from gallia.utils import g_repr +from gallia.services.uds.core.constants import UDSErrorCodes, UDSIsoServices +from gallia.services.uds.core.utils import g_repr class Operator(DatabaseHandler): @@ -78,7 +78,7 @@ def get_scan_mode(self, run: int) -> ScanMode: if scan_mode_str == "scan-identifiers": return ScanMode.IDEN except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error(f"getting scan mode failed: {g_repr(exc)}") + self.logger.error(f"getting scan mode failed: {g_repr(exc)}") return ScanMode.UNKNOWN return ScanMode.UNKNOWN @@ -88,16 +88,16 @@ def get_sid(self, run: int) -> int: """ try: if self.get_scan_mode(run) != ScanMode.IDEN: - self.logger.log_error("scan mode is not scan_identifier") + self.logger.error("scan mode is not scan_identifier") return -1 raw_df = self.read_run_db(TblNm.iden, run) self.check_df(raw_df, TblStruct.iden) serv_vec = np.unique(raw_df[ColNm.serv]) if serv_vec.shape[0] > 1: - self.logger.log_warning("A run has more than one Service ID") + self.logger.warning("A run has more than one Service ID") serv_ser = raw_df[ColNm.serv].mode(dropna=True) if serv_ser.shape[0] > 1: - self.logger.log_warning( + self.logger.warning( "A run has more than one most frequent Service ID" ) except ( @@ -107,7 +107,7 @@ def get_sid(self, run: int) -> int: EmptyTableException, ColumnMismatchException, ) as exc: - self.logger.log_error(f"getting Service ID failed: {g_repr(exc)}") + self.logger.error(f"getting Service ID failed: {g_repr(exc)}") return -1 return serv_ser[0] @@ -127,7 +127,7 @@ def get_ecu_mode(self, run: int) -> int: ecu_mode = 0 return ecu_mode except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error(f"getting ECU mode failed: {g_repr(exc)}") + self.logger.error(f"getting ECU mode failed: {g_repr(exc)}") return -1 def get_op_mode(self, iso_on: bool) -> OpMode: @@ -155,7 +155,7 @@ def get_sess_lu(self) -> np.ndarray: EmptyTableException, ColumnMismatchException, ) as exc: - self.logger.log_error( + self.logger.error( f"getting sessions in lookup table failed: {g_repr(exc)}" ) return np.array([]) @@ -183,7 +183,7 @@ def get_ref_df_from_json(self, path: str) -> pd.DataFrame: FileNotFoundError, JSONDecodeError, ) as exc: - self.logger.log_error( + self.logger.error( f"getting reference summary from JSON failed: {g_repr(exc)}" ) return pd.DataFrame() @@ -208,7 +208,7 @@ def get_dft_err_df_from_raw(self, raw_df: pd.DataFrame) -> pd.DataFrame: EmptyTableException, ColumnMismatchException, ) as exc: - self.logger.log_error( + self.logger.error( f"getting default error data frame failed: {g_repr(exc)}" ) return pd.DataFrame() @@ -226,7 +226,7 @@ def get_pos_res(self, search_id: int) -> str: res_df = self.get_df_by_query(res_sql) resp = cast(str, res_df.iloc[0, 0]) except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error(f"getting positive response failed: {g_repr(exc)}") + self.logger.error(f"getting positive response failed: {g_repr(exc)}") return "" return resp @@ -261,12 +261,12 @@ def load_meta(self, force: bool = False) -> bool: self.cur.executescript(gen_meta_sql) meta_df = self.read_db(TblNm.meta) if meta_df.shape == (0, 0): - self.logger.log_error("no meta data") + self.logger.error("no meta data") return False meta_df.set_index("run_id", inplace=True) self.run_meta_df = meta_df except (KeyError, IndexingError, AttributeError, OperationalError) as exc: - self.logger.log_error( + self.logger.error( f"loading run meta data failed: {g_repr(exc)}", ) return False @@ -334,7 +334,7 @@ def load_ven_lu(self, force: bool = False, num_modes: int = NUM_ECU_MODES) -> bo EmptyTableException, ColumnMismatchException, ) as exc: - self.logger.log_error( + self.logger.error( f"loading vendor-specific reference failed: {g_repr(exc)}" ) return False @@ -353,7 +353,7 @@ def load_ref_iso(self, force: bool = False) -> bool: self.supp_serv_iso_vec = np.sort(np.array(ref_iso_df.index)) self.ref_iso_df: pd.DataFrame = ref_iso_df.sort_index() except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"loading reference summary for UDS ISO failed: {g_repr(exc)}" ) return False @@ -402,7 +402,7 @@ def load_ven_sess(self) -> bool: ) as exc: self.sess_name_dict = self.iso_sess_name_dict self.sess_code_dict = self.iso_sess_code_dict - self.logger.log_error( + self.logger.error( f"loading vendor-specific sessions failed: {g_repr(exc)}" ) return False @@ -442,7 +442,7 @@ def load_lu_iden(self, serv: int, ecu_mode: int) -> bool: OperationalError, ) as exc: self.lu_iden_df = pd.DataFrame() - self.logger.log_error( + self.logger.error( f"loading lookup for service 0x{serv:02x} failed: {g_repr(exc)}" ) return False @@ -453,10 +453,10 @@ def prepare_table(self) -> bool: prepare relational tables to save data for scan_service and scan_identifier. """ if not self.create_table(TblNm.serv, TblStruct.serv): - self.logger.log_error("preparing table for scan_service failed") + self.logger.error("preparing table for scan_service failed") return False if not self.create_table(TblNm.iden, TblStruct.iden): - self.logger.log_error("preparing table for scan_identifier failed") + self.logger.error("preparing table for scan_identifier failed") return False return True @@ -529,7 +529,7 @@ def prepare_alwd_sess_boot( EmptyTableException, ColumnMismatchException, ) as exc: - self.logger.log_error( + self.logger.error( f"preparing table for session and boot failed: {g_repr(exc)}" ) return False @@ -556,7 +556,7 @@ def prepare_alwd( pair_df = pd.DataFrame(pair_ls, columns=[ColNm.serv, col_name]) self.write_db(pair_df, table_name) except (KeyError, IndexError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"preparing table for availabilities failed: {g_repr(exc)}" ) return False diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index a9f93df30..5e025d03a 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -18,7 +18,7 @@ from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import ColNm, TblNm from gallia.analyzer.exceptions import ColumnMismatchException, EmptyTableException -from gallia.utils import g_repr +from gallia.services.uds.core.utils import g_repr class Reporter(Operator): @@ -52,7 +52,7 @@ def consolidate_xl_serv(self, show_psb: bool = False) -> bool: return False self.load_ven_sess() self.load_ven_lu() - self.logger.log_summary( + self.logger.result( f"consolidating scan_service by ECU mode from {self.db_path} ..." ) xl_generator = ExcelGenerator(self.db_path, self.log_mode) @@ -75,12 +75,12 @@ def consolidate_xl_serv(self, show_psb: bool = False) -> bool: ColumnMismatchException, AttributeError, ) as exc: - self.logger.log_error( + self.logger.error( f"consolidating scan_service failed: {g_repr(exc)}" ) continue except EmptyTableException: - self.logger.log_warning(f"nothing to report for ECU mode {ecu_mode}.") + self.logger.warning(f"nothing to report for ECU mode {ecu_mode}.") if xl_is_empty: return False out_path = self.get_path( @@ -96,20 +96,20 @@ def consolidate_xl_iden(self, serv: int, show_psb: bool = False) -> bool: for a certain given service into one EXCEL file. """ if serv not in self.iso_serv_by_iden_vec: - self.logger.log_error("given Service ID is not service by identifier.") + self.logger.error("given Service ID is not service by identifier.") return False if not self.load_meta(force=True): return False self.load_ven_sess() self.load_ven_lu() - self.logger.log_summary( + self.logger.result( f"consolidating for Service ID 0x{serv:02X} {self.iso_serv_code_dict[serv]} from {self.db_path} ..." ) xl_generator = ExcelGenerator(self.db_path, self.log_mode) xl_is_empty = True if self.num_modes == 0: num_modes = NUM_ECU_MODES - self.logger.log_warning( + self.logger.warning( f"no information about ECU modes. trying {NUM_ECU_MODES} mode(s)..." ) else: @@ -132,14 +132,14 @@ def consolidate_xl_iden(self, serv: int, show_psb: bool = False) -> bool: ColumnMismatchException, AttributeError, ) as exc: - self.logger.log_error( + self.logger.error( f"consolidating scan_identifier failed: {g_repr(exc)}" ) continue except EmptyTableException: - self.logger.log_error(f"nothing to report for ECU mode {ecu_mode}.") + self.logger.error(f"nothing to report for ECU mode {ecu_mode}.") if xl_is_empty: - self.logger.log_info(f"nothing to report for Service ID 0x{serv:02X}") + self.logger.info(f"nothing to report for Service ID 0x{serv:02X}") return False out_path = self.get_path( f"0x{serv:02X}_{self.iso_serv_code_dict[serv]}", @@ -170,7 +170,7 @@ def report_xl_each_run(self, run: int, show_psb: bool = False) -> bool: """ generate EXCEL report for a certain run. """ - self.logger.log_summary(f"reporting run #{str(run)} from {self.db_path} ...") + self.logger.result(f"reporting run #{str(run)} from {self.db_path} ...") scan_mode = self.get_scan_mode(run) if scan_mode == ScanMode.SERV: return self.report_xl_serv(run, show_psb) @@ -196,7 +196,7 @@ def report_xl_serv(self, run: int, show_psb: bool = False) -> bool: if not xl_generator.save_close_xl(out_path): return False except (EmptyTableException, ColumnMismatchException, AttributeError) as exc: - self.logger.log_error(f"reporting scan_service failed: {g_repr(exc)}") + self.logger.error(f"reporting scan_service failed: {g_repr(exc)}") return False return True @@ -218,7 +218,7 @@ def report_xl_iden(self, run: int, show_psb: bool = False) -> bool: if not xl_generator.save_close_xl(out_path): return False except (EmptyTableException, ColumnMismatchException, AttributeError) as exc: - self.logger.log_error(f"reporting scan_identifier failed: {g_repr(exc)}") + self.logger.error(f"reporting scan_identifier failed: {g_repr(exc)}") return False return True @@ -232,7 +232,7 @@ def get_path( out_path = self.artifacts_dir.joinpath(f"{suffix}{ext}") if out_path.is_file() and rm_if_exists: os.remove(out_path) - self.logger.log_info(f"existing file removed from {out_path}") + self.logger.info(f"existing file removed from {out_path}") return str(out_path) def get_entries_oi(self, scan_mode: ScanMode, show_psb: bool = False) -> np.ndarray: @@ -261,7 +261,7 @@ def load_sid_oi(self, run: int, ecu_mode: int = -1) -> bool: if not self.load_sid_oi_from_df(raw_df, ecu_mode): return False except (EmptyTableException, ColumnMismatchException) as exc: - self.logger.log_error(f"loading services of interest failed: {g_repr(exc)}") + self.logger.error(f"loading services of interest failed: {g_repr(exc)}") return False return True @@ -289,7 +289,7 @@ def load_sid_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool: cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode self.abn_serv_vec = np.sort(np.unique(raw_df.loc[cond_abn, ColNm.serv])) except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"loading services of interest from data frame failed: {g_repr(exc)}" ) return False @@ -305,7 +305,7 @@ def load_iden_oi(self, run: int, ecu_mode: int = -1) -> bool: if not self.load_iden_oi_from_df(raw_df, ecu_mode): return False except (EmptyTableException, ColumnMismatchException) as exc: - self.logger.log_error( + self.logger.error( f"loading identifiers of interest failed: {g_repr(exc)}" ) return False @@ -318,7 +318,7 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool try: serv_vec = np.sort(np.unique(raw_df[ColNm.serv])) if not serv_vec.size == 1: - self.logger.log_error("more than one service in a run") + self.logger.error("more than one service in a run") return False dft_err_df = self.get_dft_err_df_from_raw(raw_df) dft_err_ser: pd.Series = dft_err_df.loc[ColNm.dft] @@ -338,7 +338,7 @@ def load_iden_oi_from_df(self, raw_df: pd.DataFrame, ecu_mode: int = -1) -> bool cond_abn &= raw_df[ColNm.ecu_mode] == ecu_mode self.abn_iden_vec = np.sort(np.unique(raw_df.loc[cond_abn, ColNm.iden])) except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"loading identifiers of interest from data frame failed: {g_repr(exc)}" ) return False diff --git a/src/gallia/analyzer/time_analyzer.py b/src/gallia/analyzer/time_analyzer.py index d5a3c8462..4995ea531 100644 --- a/src/gallia/analyzer/time_analyzer.py +++ b/src/gallia/analyzer/time_analyzer.py @@ -17,7 +17,7 @@ from gallia.analyzer.config import PltDesign, TblStruct, SrcPath, DFT_T_PREC from gallia.analyzer.mode_config import ScanMode, LogMode from gallia.analyzer.name_config import ColNm, TblNm, KyNm -from gallia.utils import g_repr +from gallia.services.uds.core.utils import g_repr class TimeAnalyzer(Reporter): @@ -49,7 +49,7 @@ def extract_tra_each_run(self, run: int) -> bool: """ extract reaction times of each run. """ - self.logger.log_summary( + self.logger.result( f"extracting time for run #{str(run)} from {self.db_path} ..." ) scan_mode = self.get_scan_mode(run) @@ -91,7 +91,7 @@ def extract_tra_each_run(self, run: int) -> bool: EmptyTableException, ColumnMismatchException, ) as exc: - self.logger.log_error( + self.logger.error( f"extracting reaction time for run #{run} failed: {g_repr(exc)}" ) return False @@ -109,7 +109,7 @@ def plot_tra_each_run(self, run: int) -> bool: """ plot reaction time for each run. """ - self.logger.log_summary( + self.logger.result( f"plotting reaction time for run #{str(run)} from {self.db_path} ..." ) scan_mode = self.get_scan_mode(run) @@ -152,7 +152,7 @@ def plot_tra_serv(self, run: int) -> bool: plt.cla() plt.close() except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: - self.logger.log_error( + self.logger.error( f"plotting service ID and reaction time in run #{run} failed: {g_repr(exc)}" ) return False @@ -191,7 +191,7 @@ def plot_tra_iden(self, run: int) -> bool: plt.cla() plt.close() except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: - self.logger.log_error( + self.logger.error( f"plotting identifier and reaction time in run #{run} failed: {g_repr(exc)}" ) return False @@ -209,7 +209,7 @@ def hist_tra_each_run(self, run: int) -> bool: """ create a histogram of reaction time for a given run. """ - self.logger.log_summary( + self.logger.result( f"creating a histogram for run #{str(run)} from {self.db_path} ..." ) try: @@ -227,7 +227,7 @@ def hist_tra_each_run(self, run: int) -> bool: plt.cla() plt.close() except (KeyError, IndexingError, AttributeError, FileNotFoundError) as exc: - self.logger.log_error( + self.logger.error( f"establishing histogram of identifiers in run #{run} failed: {g_repr(exc)}" ) return False diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index 63afe9f54..ce5591452 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -26,7 +26,7 @@ from gallia.analyzer.failure import Failure from gallia.analyzer.mode_config import LogMode, ScanMode from gallia.analyzer.name_config import ColNm, ShtNm, CellCnt, KyNm -from gallia.utils import g_repr +from gallia.services.uds.core.utils import g_repr class ExcelGenerator(Operator): @@ -70,7 +70,7 @@ def write_xl( if len(self.workbook.worksheets) == 0: self.workbook.create_sheet(ShtNm.init) except (SheetTitleException, ReadOnlyWorkbookException) as exc: - self.logger.log_error(f"generating EXCEL failed: {g_repr(exc)}") + self.logger.error(f"generating EXCEL failed: {g_repr(exc)}") return False return True @@ -79,7 +79,7 @@ def save_close_xl(self, out_path: str) -> bool: self.workbook.save(out_path) self.workbook.close() except (InvalidFileException, WorkbookAlreadySaved) as exc: - self.logger.log_error(f"saving EXCEL failed: {g_repr(exc)}") + self.logger.error(f"saving EXCEL failed: {g_repr(exc)}") return False return True @@ -113,7 +113,7 @@ def add_sum_sheet_serv( ScanMode.SERV, ) except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: - self.logger.log_error(f"adding summary sheet failed: {g_repr(exc)}") + self.logger.error(f"adding summary sheet failed: {g_repr(exc)}") return False return True @@ -149,7 +149,7 @@ def add_sum_sheet_iden( ScanMode.IDEN, ) except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: - self.logger.log_error(f"adding summary sheet failed: {g_repr(exc)}") + self.logger.error(f"adding summary sheet failed: {g_repr(exc)}") return False return True @@ -188,7 +188,7 @@ def sum_sheet_fill_origin( self.start_row + 1, self.start_col + 1 ).coordinate except (KeyError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"filling origin cell of summary sheet failed: {g_repr(exc)}" ) return self.start_row, self.start_col @@ -242,7 +242,7 @@ def sum_sheet_fill_index( name=XlDesign.font_index ) except (KeyError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"filling index of summary sheet failed: {g_repr(exc)}" ) return self.start_row, self.start_col + 1 @@ -285,7 +285,7 @@ def sum_sheet_fill_sess( cur_col -= sess_num cur_row = self.start_row + 2 except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"filling top session row of summary sheet failed: {g_repr(exc)}" ) return self.start_row + 1, self.start_col + 1 @@ -344,7 +344,7 @@ def sum_sheet_fill_resp( cur_col += 1 cur_row = self.start_row + 2 except (KeyError, IndexingError, AttributeError) as exc: - self.logger.log_error( + self.logger.error( f"filling response field of summary sheet failed: {g_repr(exc)}" ) return self.start_row + 1, self.start_col + 1 @@ -357,7 +357,7 @@ def add_failure_sheet( add failure(undocumented or missing) sheet to report EXCEL file. """ if scan_mode == ScanMode.UNKNOWN: - self.logger.log_error("adding summary sheet failed: scan mode unknown.") + self.logger.error("adding summary sheet failed: scan mode unknown.") return False try: dft_err_df = self.get_dft_err_df_from_raw(raw_df) @@ -382,7 +382,7 @@ def add_failure_sheet( ) self.worksheet.freeze_panes = self.worksheet.cell( self.start_row + 1, self.start_col - ).coordinate # type: ignore # This seems like an error in the type hints + ).coordinate for sess in sess_vec: self.set_cell_width(cur_col, width) self.worksheet.cell(cur_row, cur_col).value = self.get_code_text( @@ -460,7 +460,7 @@ def add_failure_sheet( cur_row = self.start_row cur_col = self.start_col except (KeyError, IndexingError, AttributeError, SheetTitleException) as exc: - self.logger.log_error( + self.logger.error( f"adding failure summary sheets failed: {g_repr(exc)}" ) return False @@ -478,7 +478,7 @@ def load_color_code(self, path: str) -> bool: for color_code in color_code_ls } except (FileNotFoundError, KeyError, JSONDecodeError) as exc: - self.logger.log_error(f"loading color codes failed: {g_repr(exc)}") + self.logger.error(f"loading color codes failed: {g_repr(exc)}") return False return True @@ -489,7 +489,7 @@ def set_cell_width(self, col: int, width: int) -> bool: try: self.worksheet.column_dimensions[get_column_letter(col)].width = width except (KeyError, AttributeError) as exc: - self.logger.log_error(f"setting cell width failed: {g_repr(exc)}") + self.logger.error(f"setting cell width failed: {g_repr(exc)}") return False return True @@ -500,7 +500,7 @@ def set_cell_height(self, row: int, height: int) -> bool: try: self.worksheet.row_dimensions[row].height = height except (KeyError, AttributeError) as exc: - self.logger.log_error(f"setting cell height failed: {g_repr(exc)}") + self.logger.error(f"setting cell height failed: {g_repr(exc)}") return False return True @@ -515,7 +515,7 @@ def fill_cell(self, row: int, col: int, error: int) -> bool: fill_type="solid", ) except (KeyError, AttributeError) as exc: - self.logger.log_error(f"filling cell failed: {g_repr(exc)}") + self.logger.error(f"filling cell failed: {g_repr(exc)}") return False return True diff --git a/src/gallia/cli.py b/src/gallia/cli.py index 5c38b009f..0935358e2 100644 --- a/src/gallia/cli.py +++ b/src/gallia/cli.py @@ -156,6 +156,12 @@ def load_parsers() -> Parsers: description="miscellaneous uncategorized helper scripts", metavar=command, ) + add_cli_group( + parsers, + "analyzer", + "analyzer", + metavar=subgroup, + ) return parsers diff --git a/src/gallia/commands/__init__.py b/src/gallia/commands/__init__.py index 82dc3ad69..41fd04f8e 100644 --- a/src/gallia/commands/__init__.py +++ b/src/gallia/commands/__init__.py @@ -24,6 +24,7 @@ from gallia.commands.scan.uds.sa_dump_seeds import SASeedsDumper from gallia.commands.scan.uds.services import ServicesScanner from gallia.commands.scan.uds.sessions import SessionsScanner +from gallia.analyzer.main import AnalyzerMain registry: list[type[BaseCommand]] = [ DoIPDiscoverer, @@ -45,6 +46,7 @@ SendPDUPrimitive, WMBAPrimitive, WriteByIdentifierPrimitive, + AnalyzerMain ] __all__ = [ @@ -67,6 +69,7 @@ "DSendPDUPrimitive", "DWMBAPrimitive", "DWriteByIdentifierPrimitive", + "AnalyzerMain", ] From 366028b5f3203d8fcff1f75cb762b2e638007c7b Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Wed, 24 Jul 2024 14:16:11 +0200 Subject: [PATCH 18/26] Fix scanner command/mode detection --- src/gallia/analyzer/db_handler.py | 4 ++-- src/gallia/analyzer/main.py | 12 +++++++----- src/gallia/analyzer/operator.py | 10 ++++++---- src/gallia/analyzer/reporter.py | 6 ++++-- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/gallia/analyzer/db_handler.py b/src/gallia/analyzer/db_handler.py index 087fe91c9..1744cac18 100644 --- a/src/gallia/analyzer/db_handler.py +++ b/src/gallia/analyzer/db_handler.py @@ -24,10 +24,10 @@ class DatabaseHandler: def __init__(self, path: str = "", mode: LogMode = LogMode.STD_OUT) -> None: self.set_db_path(path) - self.mode = mode + self.log_mode = mode self.con: sqlite3.Connection self.cur: sqlite3.Cursor - self.logger = get_logger(__file__) + self.logger = get_logger(__package__) self.connect_db() def set_db_path(self, path: str = "") -> bool: diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index ccd264e87..851e2e5a7 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -31,6 +31,7 @@ from gallia.analyzer.arg_help import ArgHelp from gallia.command.base import Script from gallia.log import get_logger +from gallia.utils import auto_int from argparse import ArgumentParser from gallia.config import Config @@ -54,7 +55,7 @@ class AnalyzerMain(Script): def __init__(self, parser: ArgumentParser, config: Config = Config()) -> None: super().__init__(parser, config) self.artifacts_dir: Path - self.logger = get_logger(__file__) + self.logger = get_logger(__package__) def prepare_artifactsdir(self, path: Optional[Path]) -> Path: if path is None: @@ -93,9 +94,9 @@ def configure_parser(self) -> None: # Parameters grp_param = self.parser.add_argument_group("Parameter") - grp_param.add_argument("--sid", type=int, help=ArgHelp.sid, default=-1) - grp_param.add_argument("--from", type=int, help=ArgHelp.first, default=0) - grp_param.add_argument("--to", type=int, help=ArgHelp.last, default=0) + grp_param.add_argument("--sid", type=auto_int, help=ArgHelp.sid, default=-1) + grp_param.add_argument("--from", type=auto_int, help=ArgHelp.first, default=0) + grp_param.add_argument("--to", type=auto_int, help=ArgHelp.last, default=0) grp_param.add_argument("--source", type=str, help=ArgHelp.source, default="") grp_param.add_argument("--precision", type=int, help=ArgHelp.prec, default=0) grp_param.add_argument( @@ -196,7 +197,8 @@ def main(self, args: Namespace) -> None: reporter = Reporter(db_path, self.artifacts_dir, log_mode) if report_on: - reporter.report_xl(runs_vec, show_possible_on) + res = reporter.report_xl(runs_vec, show_possible_on) + self.logger.result(f'Report result: {res}') if aio_service_on: reporter.consolidate_xl_serv(show_possible_on) diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index 7da578ec9..ce318237f 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -73,14 +73,16 @@ def get_scan_mode(self, run: int) -> ScanMode: return ScanMode.UNKNOWN try: scan_mode_str = self.run_meta_df.loc[run, ColNm.scan_mode] - if scan_mode_str == "scan-services": + if scan_mode_str == "scan-uds-services": return ScanMode.SERV - if scan_mode_str == "scan-identifiers": + if scan_mode_str == "scan-uds-identifiers": return ScanMode.IDEN + else: + self.logger.error(f"Unknown scan mode: {scan_mode_str}") + return ScanMode.UNKNOWN except (KeyError, IndexingError, AttributeError) as exc: self.logger.error(f"getting scan mode failed: {g_repr(exc)}") return ScanMode.UNKNOWN - return ScanMode.UNKNOWN def get_sid(self, run: int) -> int: """ @@ -246,7 +248,7 @@ def load_meta(self, force: bool = False) -> bool: FROM "{TblNm.scan_run}"; CREATE VIEW "{VwNm.mode_vw}" AS SELECT "{ColNm.id}" AS "{ColNm.run_id}", - "script" AS "{ColNm.scan_mode}" + json_extract("command_meta", "$.group") || "-" || json_extract("command_meta", "$.subgroup") || "-" || json_extract("command_meta", "$.command") AS "{ColNm.scan_mode}" FROM "{TblNm.run_meta}"; DROP TABLE IF EXISTS "{TblNm.meta}"; CREATE TABLE "{TblNm.meta}" diff --git a/src/gallia/analyzer/reporter.py b/src/gallia/analyzer/reporter.py index 5e025d03a..f8c67586c 100644 --- a/src/gallia/analyzer/reporter.py +++ b/src/gallia/analyzer/reporter.py @@ -162,9 +162,10 @@ def report_xl( return False self.load_ven_sess() self.load_ven_lu() + res = True for run in runs_vec: - self.report_xl_each_run(run, show_psb) - return True + res = res & self.report_xl_each_run(run, show_psb) + return res def report_xl_each_run(self, run: int, show_psb: bool = False) -> bool: """ @@ -176,6 +177,7 @@ def report_xl_each_run(self, run: int, show_psb: bool = False) -> bool: return self.report_xl_serv(run, show_psb) if scan_mode == ScanMode.IDEN: return self.report_xl_iden(run, show_psb) + self.logger.error(f"Unknown scan mode: {scan_mode}") return False def report_xl_serv(self, run: int, show_psb: bool = False) -> bool: From 5b991df542595581f069f203d7397a474e2e3ccc Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Wed, 24 Jul 2024 14:47:21 +0200 Subject: [PATCH 19/26] Use BaseCommand as base class to get artifacts_dir --- src/gallia/analyzer/main.py | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/src/gallia/analyzer/main.py b/src/gallia/analyzer/main.py index 851e2e5a7..92b5bfee3 100755 --- a/src/gallia/analyzer/main.py +++ b/src/gallia/analyzer/main.py @@ -29,7 +29,7 @@ ANALYZER_AVAILABLE = False from gallia.analyzer.arg_help import ArgHelp -from gallia.command.base import Script +from gallia.command.base import BaseCommand from gallia.log import get_logger from gallia.utils import auto_int from argparse import ArgumentParser @@ -45,33 +45,19 @@ # ========================================================== # -class AnalyzerMain(Script): +class AnalyzerMain(BaseCommand): """Analyzer""" GROUP = "analyzer" COMMAND = "run" SHORT_HELP = "request VIN" + HAS_ARTIFACTS_DIR = True + def __init__(self, parser: ArgumentParser, config: Config = Config()) -> None: super().__init__(parser, config) - self.artifacts_dir: Path self.logger = get_logger(__package__) - def prepare_artifactsdir(self, path: Optional[Path]) -> Path: - if path is None: - base = Path(gettempdir()) - p = base.joinpath( - f'{self.id}_{time.strftime("%Y%m%d-%H%M%S")}_{token_urlsafe(6)}' - ) - p.mkdir(parents=True) - return p - - if path.is_dir(): - return path - - self.logger.error(f"Data directory {path} is not an existing directory.") - sys.exit(1) - def configure_parser(self) -> None: # Commands grp_cmd = self.parser.add_argument_group("Command") @@ -106,16 +92,13 @@ def configure_parser(self) -> None: help="Folder for artifacts", ) - def main(self, args: Namespace) -> None: + def run(self, args: Namespace) -> None: if not ANALYZER_AVAILABLE: self.logger.error( "Please install optional dependencies to run the analyzer" ) sys.exit(1) - self.artifacts_dir = self.prepare_artifactsdir(args.data_dir) - self.logger.result(f"Storing artifacts at {self.artifacts_dir}") - args = vars(args) # Commands analyze_on = args["a"] From 188f2e88ad6bbfdaebbd94deae09f1606bd4b611 Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Mon, 29 Jul 2024 08:14:42 +0200 Subject: [PATCH 20/26] Fix run_id and run_meta_id confusion --- src/gallia/analyzer/name_config.py | 1 + src/gallia/analyzer/operator.py | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/gallia/analyzer/name_config.py b/src/gallia/analyzer/name_config.py index 354ea682d..a965cc059 100644 --- a/src/gallia/analyzer/name_config.py +++ b/src/gallia/analyzer/name_config.py @@ -34,6 +34,7 @@ class for colunm names in relational tables run = "run" run_id = "run_id" + run_meta_id = "run_meta_id" index = "index" sess = "session" sess_name = "session_name" diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index ce318237f..d1ff00cdb 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -244,10 +244,10 @@ def load_meta(self, force: bool = False) -> bool: DROP VIEW IF EXISTS "{VwNm.ecu_vw}"; DROP VIEW IF EXISTS "{VwNm.mode_vw}"; CREATE VIEW "{VwNm.ecu_vw}" - AS SELECT "{ColNm.id}", json_extract("properties_pre", "$.mode") AS "{ColNm.ecu_mode}" + AS SELECT "{ColNm.id}" as {ColNm.run_id}, meta as {ColNm.run_meta_id}, json_extract("properties_pre", "$.mode") AS "{ColNm.ecu_mode}" FROM "{TblNm.scan_run}"; CREATE VIEW "{VwNm.mode_vw}" - AS SELECT "{ColNm.id}" AS "{ColNm.run_id}", + AS SELECT "{ColNm.id}" AS "{ColNm.run_meta_id}", json_extract("command_meta", "$.group") || "-" || json_extract("command_meta", "$.subgroup") || "-" || json_extract("command_meta", "$.command") AS "{ColNm.scan_mode}" FROM "{TblNm.run_meta}"; DROP TABLE IF EXISTS "{TblNm.meta}"; @@ -255,9 +255,7 @@ def load_meta(self, force: bool = False) -> bool: AS SELECT "{ColNm.run_id}", "{ColNm.ecu_mode}", "{ColNm.scan_mode}" FROM "{VwNm.ecu_vw}" INNER JOIN "{VwNm.mode_vw}" - ON "{VwNm.ecu_vw}"."{ColNm.id}" = "{VwNm.mode_vw}"."{ColNm.run_id}"; - DROP VIEW IF EXISTS "{VwNm.ecu_vw}"; - DROP VIEW IF EXISTS "{VwNm.mode_vw}"; + ON "{VwNm.ecu_vw}"."{ColNm.run_meta_id}" = "{VwNm.mode_vw}"."{ColNm.run_meta_id}"; """ try: self.cur.executescript(gen_meta_sql) From fb57f02a6df3d8469bd3452a29b69b8322f3ec7e Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Mon, 29 Jul 2024 08:17:32 +0200 Subject: [PATCH 21/26] only analyze successful scan runs --- src/gallia/analyzer/operator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index d1ff00cdb..eb0c4503c 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -249,7 +249,8 @@ def load_meta(self, force: bool = False) -> bool: CREATE VIEW "{VwNm.mode_vw}" AS SELECT "{ColNm.id}" AS "{ColNm.run_meta_id}", json_extract("command_meta", "$.group") || "-" || json_extract("command_meta", "$.subgroup") || "-" || json_extract("command_meta", "$.command") AS "{ColNm.scan_mode}" - FROM "{TblNm.run_meta}"; + FROM "{TblNm.run_meta}" + WHERE exit_code = 0; DROP TABLE IF EXISTS "{TblNm.meta}"; CREATE TABLE "{TblNm.meta}" AS SELECT "{ColNm.run_id}", "{ColNm.ecu_mode}", "{ColNm.scan_mode}" From 3ed196d5a1b645ec1e4c7fb385ffe0b51e07fd17 Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Mon, 29 Jul 2024 08:48:28 +0200 Subject: [PATCH 22/26] fix xl reporter for having id and sub_func --- src/gallia/analyzer/xl_generator.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index ce5591452..b8a9a993e 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -230,13 +230,12 @@ def sum_sheet_fill_index( else: index_name = f"0x{int(row.identifier):04X}" self.worksheet.cell(cur_row, self.start_col).value = index_name - cur_row += 1 if row.subfunc != -1: # service has subfunction and identifier self.worksheet.cell( cur_row, self.start_col + 1 ).value = row.subfunc - cur_row += 1 + cur_row += 1 self.worksheet.cell(cur_row, cur_col).font = Font( name=XlDesign.font_index From 11a0fe6a47efdcfd1432b0de3a62c0ab9bb54812 Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Mon, 29 Jul 2024 09:21:06 +0200 Subject: [PATCH 23/26] Add support for scan reset --- src/gallia/analyzer/extractor.py | 2 ++ src/gallia/analyzer/operator.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index 06bb9ffd4..b1974089b 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -133,6 +133,8 @@ def extract_iden(self, run: int) -> bool: AND json_extract("request_data", '$.sub_function') IS NULL THEN -1 WHEN json_extract("request_data", '$.service_id') = 39 THEN json_extract("request_data", '$.sub_function') + WHEN json_extract("request_data", '$.service_id') = 0x11 + THEN json_extract("request_data", '$.sub_function') WHEN json_extract("request_data", '$.data_identifier') IS NULL THEN json_extract("request_data", '$.data_identifiers[0]') ELSE json_extract("request_data", '$.data_identifier') diff --git a/src/gallia/analyzer/operator.py b/src/gallia/analyzer/operator.py index eb0c4503c..1a6d83fb7 100644 --- a/src/gallia/analyzer/operator.py +++ b/src/gallia/analyzer/operator.py @@ -75,7 +75,7 @@ def get_scan_mode(self, run: int) -> ScanMode: scan_mode_str = self.run_meta_df.loc[run, ColNm.scan_mode] if scan_mode_str == "scan-uds-services": return ScanMode.SERV - if scan_mode_str == "scan-uds-identifiers": + if scan_mode_str in ["scan-uds-identifiers", "scan-uds-reset"]: return ScanMode.IDEN else: self.logger.error(f"Unknown scan mode: {scan_mode_str}") From f3331a566dd6324778c970bca658c9a8df02091d Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Wed, 2 Oct 2024 17:39:45 +0200 Subject: [PATCH 24/26] add hot fix for IndexError --- src/gallia/analyzer/xl_generator.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index b8a9a993e..d7879d263 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -148,7 +148,7 @@ def add_sum_sheet_iden( entries_vec, ScanMode.IDEN, ) - except (KeyError, IndexError, AttributeError, SheetTitleException) as exc: + except (KeyError, AttributeError, SheetTitleException) as exc: self.logger.error(f"adding summary sheet failed: {g_repr(exc)}") return False return True @@ -303,6 +303,7 @@ def sum_sheet_fill_resp( """ fill response field in summary sheet. """ + print(raw_df) try: sess_vec = np.array(dft_err_df.columns) if scan_mode == ScanMode.SERV: @@ -325,6 +326,10 @@ def sum_sheet_fill_resp( & (raw_df[ColNm.sbfn] == sbfn) ) err_ser = raw_df.loc[cond, ColNm.resp].mode() + if err_ser.size == 0: + cur_row += 1 + print(f'Error on entry: 0x{entry:X} sbfn: 0x{sbfn:X}') + continue resp = self.get_code_text( err_ser.iloc[-1], self.iso_err_code_dict ) @@ -356,7 +361,7 @@ def add_failure_sheet( add failure(undocumented or missing) sheet to report EXCEL file. """ if scan_mode == ScanMode.UNKNOWN: - self.logger.error("adding summary sheet failed: scan mode unknown.") + self.logger.error("adding failure summary sheets failed: scan mode unknown.") return False try: dft_err_df = self.get_dft_err_df_from_raw(raw_df) From 55357e62715f8688a76dafa483d24f69607d529d Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Thu, 24 Oct 2024 08:08:08 +0200 Subject: [PATCH 25/26] Add analyzer basic support for SID 0x28 --- src/gallia/analyzer/extractor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/gallia/analyzer/extractor.py b/src/gallia/analyzer/extractor.py index b1974089b..433cac5c5 100644 --- a/src/gallia/analyzer/extractor.py +++ b/src/gallia/analyzer/extractor.py @@ -135,6 +135,8 @@ def extract_iden(self, run: int) -> bool: THEN json_extract("request_data", '$.sub_function') WHEN json_extract("request_data", '$.service_id') = 0x11 THEN json_extract("request_data", '$.sub_function') + WHEN json_extract("request_data", '$.service_id') = 0x28 + THEN json_extract("request_data", '$.control_type') * 0x100 + json_extract("request_data", '$.communication_type') WHEN json_extract("request_data", '$.data_identifier') IS NULL THEN json_extract("request_data", '$.data_identifiers[0]') ELSE json_extract("request_data", '$.data_identifier') From 308a9832feebad9c2f569292e158615084b4452e Mon Sep 17 00:00:00 2001 From: Tobias Specht Date: Thu, 24 Oct 2024 08:08:49 +0200 Subject: [PATCH 26/26] clean up --- src/gallia/analyzer/xl_generator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/gallia/analyzer/xl_generator.py b/src/gallia/analyzer/xl_generator.py index d7879d263..681bd48af 100644 --- a/src/gallia/analyzer/xl_generator.py +++ b/src/gallia/analyzer/xl_generator.py @@ -303,7 +303,6 @@ def sum_sheet_fill_resp( """ fill response field in summary sheet. """ - print(raw_df) try: sess_vec = np.array(dft_err_df.columns) if scan_mode == ScanMode.SERV: