From 61e354df3d1bb2640eeb722102a1486828ee46da Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Thu, 16 Nov 2023 16:18:39 -0500 Subject: [PATCH 01/13] Update log message in GSP node to indicate which task it pertained --- .../angel_system_nodes/task_monitoring/global_step_predictor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py index e24763128..75af0efc7 100644 --- a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py +++ b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py @@ -405,7 +405,7 @@ def publish_task_state_message( task_state[f"current_{step_mode}_step"] ] - log.info(f"Publish task update w/ step: {task_step_str}") + log.info(f"Publish task {message.task_name} update w/ step: {task_step_str}") # Exclude background curr_step = task_state[f"current_{step_mode}_step"] task_step = curr_step - 1 if curr_step != 0 else 0 From d7d8899abe81fb2466e0d7fc2675203d363a5172 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Thu, 16 Nov 2023 16:45:09 -0500 Subject: [PATCH 02/13] Hannah requested enabling of all task configs in multi config --- config/tasks/multi-task-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config/tasks/multi-task-config.yaml b/config/tasks/multi-task-config.yaml index 609c1bf63..97ad9e190 100644 --- a/config/tasks/multi-task-config.yaml +++ b/config/tasks/multi-task-config.yaml @@ -18,15 +18,15 @@ tasks: - id: 1 label: "Tea" config_file: "./config/tasks/recipe_tea.yaml" - active: false + active: true - id: 2 label: "Pinwheel" config_file: "./config/tasks/recipe_pinwheel.yaml" - active: false + active: true - id: 3 label: "Oatmeal" config_file: "./config/tasks/recipe_oatmeal.yaml" - active: false + active: true - id: 4 label: "Dessert Quesadilla" config_file: "./config/tasks/recipe_dessertquesadilla.yaml" From 01fe5a81dbf8d7a149c5a5617a1eb2e69b04bb69 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Thu, 16 Nov 2023 16:46:53 -0500 Subject: [PATCH 03/13] Add another time conversion function --- ros/angel_utils/python/angel_utils/conversion.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ros/angel_utils/python/angel_utils/conversion.py b/ros/angel_utils/python/angel_utils/conversion.py index 66dcacfab..a67d7f96d 100644 --- a/ros/angel_utils/python/angel_utils/conversion.py +++ b/ros/angel_utils/python/angel_utils/conversion.py @@ -36,6 +36,16 @@ def time_to_int(time_msg: Time) -> int: return (time_msg.sec * SEC_TO_NANO) + time_msg.nanosec +def time_to_float(time_msg: Time) -> float: + """ + Convert the given time message into a floating point value representing + seconds. + :param time_msg: + :return: Floating point seconds. + """ + return time_msg.sec + (time_msg.nanosec * NANO_TO_SEC) + + def nano_to_ros_time(timestamp: int) -> Time: """ Convert an integer representing time in nanoseconds to ROS2 Time message From 133f3d8eeb54c8a407ac4726ad94134e03c80bc4 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Thu, 16 Nov 2023 16:47:14 -0500 Subject: [PATCH 04/13] Add common task config structures and load functions --- angel_system/data/config_structs.py | 111 +++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) diff --git a/angel_system/data/config_structs.py b/angel_system/data/config_structs.py index 72cceeca1..9412c900c 100644 --- a/angel_system/data/config_structs.py +++ b/angel_system/data/config_structs.py @@ -2,8 +2,9 @@ Structures related to configuration files. """ -from dataclasses import dataclass, field +from dataclasses import dataclass from os import PathLike +from pathlib import Path from typing import cast from typing import Dict from typing import Sequence @@ -34,7 +35,7 @@ class ObjectLabelSet: def __post_init__(self): # coerce nested label objects into the ObjectLabel type. - if self.labels and not isinstance(self.labels, ObjectLabel): + if self.labels and not isinstance(self.labels[0], ObjectLabel): raw_labels = cast(Sequence[Dict], self.labels) self.labels = tuple(ObjectLabel(**rl) for rl in raw_labels) @@ -50,3 +51,109 @@ def load_object_label_set(filepath: PathLike) -> ObjectLabelSet: with open(filepath) as infile: data = yaml.safe_load(infile) return ObjectLabelSet(**data) + + +@dataclass +class TaskStep: + """ + A single task step with activity components. + """ + id: int + label: str + full_str: str + activity_ids: Tuple[int] + + +@dataclass +class LinearTask: + """ + A linear task with steps composed of activities. + """ + version: str + title: str + labels: Tuple[TaskStep] + + def __post_init__(self): + # Coerce pathlike input (str) into a Path instance if not already. + if self.labels and not isinstance(self.labels[0], TaskStep): + raw = cast(Sequence[Dict], self.labels) + self.labels = tuple(TaskStep(**r) for r in raw) + + +def load_linear_task_config(filepath: PathLike) -> LinearTask: + """ + Load from YAML file a linear task configuration. + + :param filepath: Filepath to load from. + + :return: Structure containing the loaded configuration. + """ + with open(filepath) as infile: + data = yaml.safe_load(infile) + return LinearTask(**data) + + +@dataclass +class OneTaskConfig: + """ + Specification of where one task configuration is located. + """ + id: int + label: str + config_file: Path + active: bool + + def __post_init__(self): + # Coerce pathlike input (str) into a Path instance if not already. + # Interpret relative paths now to absolute based on current working + # directory. + if not isinstance(self.config_file, Path): + self.config_file = Path(self.config_file).absolute() + + +@dataclass +class MultiTaskConfig: + """ + A collection of linear task configurations. + """ + version: str + title: str + tasks: Tuple[OneTaskConfig] + + def __post_init__(self): + # coerce nested task objects into OneTaskConfig types + if self.tasks and not isinstance(self.tasks[0], OneTaskConfig): + raw = cast(Sequence[Dict], self.tasks) + self.tasks = tuple(OneTaskConfig(**r) for r in raw) + + +def load_multi_task_config(filepath: PathLike): + """ + Relative file paths are currently interpreted relative to the current + working directory and resolved to be absolute. + + :param filepath: Filepath to load from. + + :return: Structure containing the loaded configuration. + """ + with open(filepath) as infile: + data = yaml.safe_load(infile) + return MultiTaskConfig(**data) + + +def load_active_task_configs(cfg: MultiTaskConfig) -> Dict[str, LinearTask]: + """ + Load task configurations that are enabled in the multitask configuration. + + :param cfg: Multitask configuration to base loading on. + + :raises FileNotFoundError: Configured task configuration file did not refer + to an open-able file. + + :return: Mapping of task label from the input configuration to the + LinearTask instance loaded. + """ + return { + ct.label: load_linear_task_config(ct.config_file) + for ct in cfg.tasks if ct.active + } From a8957d607fe3406b1116c5a393c91d724823f58d Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Thu, 16 Nov 2023 22:21:19 -0500 Subject: [PATCH 05/13] Add activity label config structs --- angel_system/data/config_structs.py | 53 ++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/angel_system/data/config_structs.py b/angel_system/data/config_structs.py index 9412c900c..56e536244 100644 --- a/angel_system/data/config_structs.py +++ b/angel_system/data/config_structs.py @@ -7,6 +7,7 @@ from pathlib import Path from typing import cast from typing import Dict +from typing import Optional from typing import Sequence from typing import Tuple @@ -53,11 +54,57 @@ def load_object_label_set(filepath: PathLike) -> ObjectLabelSet: return ObjectLabelSet(**data) +@dataclass +class ActivityLabel: + """ + One activity classification ID and paired label information. + """ + + # Identifier integer for this activity label + id: int + # Concise string label for this activity. Should not contain any spaces. + label: str + # Full sentence description of this activity. + full_str: str + # Optional integer representing how many times an activity should be + # repeated to be considered "full" + # TODO: This parameter has ambiguous and violated meaning (not used as + # intended if at all). + repeat: Optional[int] = None + + +@dataclass +class ActivityLabelSet: + version: str + title: str + labels: Tuple[ActivityLabel] + + def __post_init__(self): + # coerce nested label objects into the ObjectLabel type. + if self.labels and not isinstance(self.labels[0], ActivityLabel): + raw_labels = cast(Sequence[Dict], self.labels) + self.labels = tuple(ActivityLabel(**rl) for rl in raw_labels) + + +def load_activity_label_set(filepath: PathLike) -> ActivityLabelSet: + """ + Load from YAML file an activity label set configuration. + + :param filepath: Filepath to load from. + + :return: Structure containing the loaded configuration. + """ + with open(filepath) as infile: + data = yaml.safe_load(infile) + return ActivityLabelSet(**data) + + @dataclass class TaskStep: """ A single task step with activity components. """ + id: int label: str full_str: str @@ -69,6 +116,7 @@ class LinearTask: """ A linear task with steps composed of activities. """ + version: str title: str labels: Tuple[TaskStep] @@ -98,6 +146,7 @@ class OneTaskConfig: """ Specification of where one task configuration is located. """ + id: int label: str config_file: Path @@ -116,6 +165,7 @@ class MultiTaskConfig: """ A collection of linear task configurations. """ + version: str title: str tasks: Tuple[OneTaskConfig] @@ -155,5 +205,6 @@ def load_active_task_configs(cfg: MultiTaskConfig) -> Dict[str, LinearTask]: """ return { ct.label: load_linear_task_config(ct.config_file) - for ct in cfg.tasks if ct.active + for ct in cfg.tasks + if ct.active } From 2348ffe9f93421d7fbadb2a389e63cefb965f8d8 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 01:34:01 -0500 Subject: [PATCH 06/13] Added node to translate TaskUpdate messages to broad level --- .../task_monitoring/transform_update_lod.py | 147 ++++++++++++++++++ ros/angel_system_nodes/setup.py | 1 + tmux/demos/2023-10-eval_prep-from-old-bag.yml | 42 +++-- 3 files changed, 173 insertions(+), 17 deletions(-) create mode 100644 ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py diff --git a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py new file mode 100644 index 000000000..3c5a5a875 --- /dev/null +++ b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py @@ -0,0 +1,147 @@ +import numpy as np +import rclpy +from rclpy.node import Node + +from angel_system.data.config_structs import load_multi_task_config +from angel_system.global_step_prediction.global_step_predictor import GlobalStepPredictor + +from angel_msgs.msg import TaskUpdate +from angel_utils import declare_and_get_parameters + + +############################################################################### +# Parameter labels + +PARAM_TOPIC_INPUT = "update_topic_input" +PARAM_TOPIC_OUTPUT = "update_topic_output" + +# File for broad/granular step configuration also given to the GSP node. +PARAM_TASK_CONFIG = "task_config_file" + + +class TaskUpdateLodTransformerNode(Node): + """ + Node to transform an input TaskUpdate into another TaskUpdate at a + different level of detail. + + E.g. w.r.t. GlobalStepTracker verbiage, transform a granular-step update + into a broad-step update. + + FUTURE: This could take in "user skill" estimations and adapt task update + levels dynamically. Feedback generator + """ + + def __init__(self): + super().__init__(self.__class__.__name__) + log = self.get_logger() + + params = declare_and_get_parameters( + self, + [ + (PARAM_TOPIC_INPUT,), + (PARAM_TOPIC_OUTPUT,), + (PARAM_TASK_CONFIG,), + ] + ) + + # TODO: This is clearly super intrinsic to GSP implementation -- decouple? + # At a bare minimum, we need to map activity-full-string to broad + # task-step ID for when we receive a TaskUpdate message. We cannot do + # this naively because some broad steps are composed of activities + # shared with other distinct broad steps. + # We will need to recreate the same thing the GSP class does, which we + # can do the simplest by just instantiating our own GSP that we only + # use for structure formation. + config_multi = load_multi_task_config(params[PARAM_TASK_CONFIG]) + self._gsp = GlobalStepPredictor( + recipe_types=[t.label for t in config_multi.tasks], + recipe_config_dict={t.label: t.config_file for t in config_multi.tasks} + ) + self._task_to_tracker = {t["recipe"]: t for t in self._gsp.trackers} + + # Track previous step ID for different + self._prev_broad_id = {l: -1 for l in self._task_to_tracker} + + self._pub = self.create_publisher( + TaskUpdate, params[PARAM_TOPIC_OUTPUT], 1 + ) + self._sub = self.create_subscription( + TaskUpdate, + params[PARAM_TOPIC_INPUT], + self.cb_task_update, + 1, + ) + log.info("Init complete") + + def cb_task_update(self, msg: TaskUpdate) -> None: + """ + Translate the input TaskUpdate into the target LoD TaskUpdate message. + + :param msg: Message to convert. + """ + log = self.get_logger() + + log.info(f"Received input message:\n{msg}\n") + + tt = self._task_to_tracker[msg.task_name] + + # If we're in the background step (id=0, step="background"), special + # case that doesn't transform. + cur_gran_id = 0 if msg.current_step == "background" else msg.current_step_id + 1 + cur_broad_id = self._gsp.granular_to_broad_step(tt, cur_gran_id) + + # Cannot use GSP provided "prev step" as it is activity class string + # based, which we know is ambiguous is various locations. GSP node is + # also not tracking previous correctly... Using locally tracked + # previous broad ID. + prev_broad_id = self._prev_broad_id[msg.task_name] + + # If the current and previous step now the same, don't send an update. + # Except for when the final "completed steps" slot is now true, which + # means that the final step has completed (final change). + if cur_broad_id == prev_broad_id and not msg.completed_steps[-1]: + # No, change, nothing to translate at the broad level + return + + # Remember to decrement broad_id to "discount" background, but clamp to + # 0 if we are actually *on* background (and leave "background" as the + # str). + msg.current_step_id = max(cur_broad_id - 1, 0) + msg.current_step = tt['broad_step_to_full_str'][cur_broad_id] + msg.previous_step = tt['broad_step_to_full_str'][max(prev_broad_id, 0)] + + completed_steps_arr = np.arange(tt['total_num_broad_steps'] - 1) < (cur_broad_id - 1) + if msg.completed_steps[-1]: + # If the final granular step is done, then so is the final broad step. + completed_steps_arr[-1] = True + msg.completed_steps = completed_steps_arr.tolist() + msg.task_complete_confidence = float(np.all(msg.completed_steps)) + + log.info(f"Converted into:\n{'v'*79}\n{msg}\n{'^'*79}\n") + + self._prev_broad_id[msg.task_name] = cur_broad_id + self._pub.publish(msg) + + +def main(): + rclpy.init() + log = rclpy.logging.get_logger("main") + + node = TaskUpdateLodTransformerNode() + + try: + rclpy.spin(node) + except KeyboardInterrupt: + log.info("Keyboard interrupt, shutting down.\n") + finally: + log.info("Shutting down node and rclpy") + # Destroy the node explicitly + # (optional - otherwise it will be done automatically + # when the garbage collector destroys the node object) + node.destroy_node() + + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/ros/angel_system_nodes/setup.py b/ros/angel_system_nodes/setup.py index f8c4682f7..5731d7b94 100644 --- a/ros/angel_system_nodes/setup.py +++ b/ros/angel_system_nodes/setup.py @@ -60,6 +60,7 @@ "dummy_multi_task_monitor = angel_system_nodes.task_monitoring.dummy_multi_task_monitor:main", "global_step_predictor = angel_system_nodes.task_monitoring.global_step_predictor:main", "keyboard_to_sys_cmd = angel_system_nodes.task_monitoring.keyboard_to_sys_cmd:main", + "transform_update_lod = angel_system_nodes.task_monitoring.transform_update_lod:main", ], }, ) diff --git a/tmux/demos/2023-10-eval_prep-from-old-bag.yml b/tmux/demos/2023-10-eval_prep-from-old-bag.yml index 23dcd264a..d52ceceea 100644 --- a/tmux/demos/2023-10-eval_prep-from-old-bag.yml +++ b/tmux/demos/2023-10-eval_prep-from-old-bag.yml @@ -109,23 +109,31 @@ windows: -r __ns:=${ROS_NAMESPACE} -p system_command_topic:=SystemCommands - - task_monitor: ros2 run angel_system_nodes global_step_predictor --ros-args - -r __ns:=${ROS_NAMESPACE} - -p det_topic:=ActivityDetections - -p model_file:=${MODEL_DIR}/global_step_predictor_act_avgs_all_classes_v2.0_sample_rate_2.npy - -p threshold_multiplier_weak:=0.05 - -p thresh_frame_count:=$((8 / (30 / ${FRAME_RATE}))) - -p threshold_frame_count_weak:=2 - -p deactivate_thresh_frame_count:=$((20 / (30 / ${FRAME_RATE}))) - -p step_mode:=granular - -p config_file:=${CONFIG_DIR}/tasks/multi-task-config.yaml - -p task_state_topic:=TaskUpdates - -p query_task_graph_topic:=query_task_graph - -p task_error_topic:=TaskErrors - -p system_command_topic:=SystemCommands - #-p gt_activity_mscoco:=model_files/test_activity_preds.mscoco.json - #-p gt_video_id:=8 - #-p gt_output_dir:="${BAGS_DIR}" + - task_monitor: + layout: even-vertical + panes: + - task_monitor: ros2 run angel_system_nodes global_step_predictor --ros-args + -r __ns:=${ROS_NAMESPACE} + -p det_topic:=ActivityDetections + -p model_file:=${MODEL_DIR}/global_step_predictor_act_avgs_all_classes_v2.0_sample_rate_2.npy + -p threshold_multiplier_weak:=0.05 + -p thresh_frame_count:=$((8 / (30 / ${FRAME_RATE}))) + -p threshold_frame_count_weak:=2 + -p deactivate_thresh_frame_count:=$((20 / (30 / ${FRAME_RATE}))) + -p step_mode:=granular + -p config_file:=${CONFIG_DIR}/tasks/multi-task-config.yaml + -p task_state_topic:=TaskUpdates + -p query_task_graph_topic:=query_task_graph + -p task_error_topic:=TaskErrors + -p system_command_topic:=SystemCommands + #-p gt_activity_mscoco:=model_files/test_activity_preds.mscoco.json + #-p gt_video_id:=8 + #-p gt_output_dir:="${BAGS_DIR}" + - transform_lod: ros2 run angel_system_nodes transform_update_lod --ros-args + -r __ns:=${ROS_NAMESPACE} + -p update_topic_input:=TaskUpdates + -p update_topic_output:=TaskUpdatesBroad + -p task_config_file:="${CONFIG_DIR}/tasks/multi-task-config.yaml" - engineering-ui: layout: even-vertical From 99e2c5b93a633e0bfcc83e850e91a4b4f29b87eb Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 02:05:01 -0500 Subject: [PATCH 07/13] Add logging node for Eval2 requested format --- .../angel_system_nodes/eval/__init__.py | 0 .../angel_system_nodes/eval/eval_2_logger.py | 252 ++++++++++++++++++ ros/angel_system_nodes/setup.py | 2 + tmux/demos/2023-10-eval_prep-from-old-bag.yml | 7 + 4 files changed, 261 insertions(+) create mode 100644 ros/angel_system_nodes/angel_system_nodes/eval/__init__.py create mode 100644 ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py diff --git a/ros/angel_system_nodes/angel_system_nodes/eval/__init__.py b/ros/angel_system_nodes/angel_system_nodes/eval/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py new file mode 100644 index 000000000..2f0cc390d --- /dev/null +++ b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py @@ -0,0 +1,252 @@ +""" +Consume system messages and log to a file according to the Eval2 requirements +as documented here in the "Log Format Specification" section: + https://docs.google.com/document/d/1efuWwEvVXWJ-0H1nAV_3kyCDjkW9YY93/edit + +This node is currently only compatible with the `global_step_predictor` task +monitoring node due to leveraging specific implementation/output semantics. +""" +import csv +import math +from pathlib import Path +import time +from threading import RLock +from typing import Dict +from typing import Optional + +import numpy as np +import rclpy +from rclpy.node import Node + +from angel_system.data.config_structs import ( + load_activity_label_set, + load_multi_task_config, + load_active_task_configs, +) +from angel_system.global_step_prediction.global_step_predictor import GlobalStepPredictor + +from angel_msgs.msg import ( + AruiUserNotification, + TaskUpdate, +) +from angel_utils import declare_and_get_parameters +from angel_utils.conversion import time_to_float + + +############################################################################### +# Parameter names/docs + +# Input topic for task updates +PARAM_TOPIC_TASK_UPDATES = "topic_task_updates" +# Input topic for notifications, some of which are error notifications. +PARAM_TOPIC_NOTIFICATIONS = "topic_notifications" +# Directory in which to write our log file. +PARAM_OUTPUT_DIR = "log_output_dir" + + +############################################################################### + +# Expected string name for our team. +TEAM_NAME = "KITWARE" + +# Mapping of our recipe task name to expected logged value. +RECIPE_TO_ID = { + "Pinwheel": "A", + "Coffee": "B", + "Tea": "C", + "Oatmeal": "D", + "Dessert Quesadilla": "E", +} + + +############################################################################### + +def ts_str(t: Optional[float] = None) -> str: + """ + Generate "now" timestamp as a string, used in both filename and log lines. + :return: String "now" timestamp. + """ + if t is None: + t = time.time() + tl = time.localtime(t) + ts_fmt = time.strftime(r"%Y-%m-%dT%H:%M:%S.{decimal}Z", tl) + # `.3f` format will guarantee a decimal point in the string even if `t` is + # an effective integer or in the [0,1] range. `modf[0]` will always be less + # than 1, thus can guarantee returned value string will always start with + # "0.", using `[2:]` to get string after the decimal point. + return ts_fmt.format(decimal=f"{math.modf(t)[0]:.3f}"[2:]) + + +class Eval2LoggingNode(Node): + __doc__ = __doc__ # equal to module doc-string. + + def __init__(self): + super().__init__(self.__class__.__name__) + log = self.get_logger() + + params = declare_and_get_parameters( + self, + [ + (PARAM_TOPIC_TASK_UPDATES,), + (PARAM_TOPIC_NOTIFICATIONS,), + (PARAM_OUTPUT_DIR,), + ] + ) + + # Unix timestamp of this "trial" for logging. + self._trial_timestamp = t = time.time() + self._log_output_dir = Path(params[PARAM_OUTPUT_DIR]) + self._log_output_filepath = ( + self._log_output_dir / f"kitware_trial_log_{ts_str(t)}.log" + ) + log.info(f"Writing to log file: {self._log_output_filepath}") + + # Open file to our logging lines to. Open in non-binary mode for + # writing. + self._log_file = open(self._log_output_filepath, 'w') + self._log_csv = csv.writer(self._log_file) + # Lock for synchronizing log file writing. + self._log_lock = RLock() + + self._sub_task_update = self.create_subscription( + TaskUpdate, + params[PARAM_TOPIC_TASK_UPDATES], + self.cb_task_update, + 1, + ) + self._sub_error_notifications = self.create_subscription( + AruiUserNotification, + params[PARAM_TOPIC_NOTIFICATIONS], + self.cb_arui_notification, + 1, + ) + log.info("Init complete") + + def log_line(self, t, task_name, step_number, current_status, comment=None) -> None: + """ + Log an individual line to the file. + + Thread-safe. + + :param t: unix time the logging is associated with. + :param task_name: Recipe task name. This is expected to follow the\ + given spec, otherwise we will not log anything. + :param step_number: + :param current_status: + :param comment: Optional additional comment, like what an error is + about. + """ + log = self.get_logger() + + # Translate inputs into required format values + try: + recipe_id = RECIPE_TO_ID[task_name] if task_name != "null" else task_name + except KeyError: + log.error( + f"No recipe identifier for task name \"{task_name}\". " + f"Skipping logging. Otherwise input: " + f"step_number={step_number}, current_status={current_status}, " + f"comment={comment}" + ) + return + + row = [ + ts_str(t), + TEAM_NAME, + recipe_id, + step_number, + current_status, + ] + if comment is not None: + row.append(comment) + + log.info(f"Logging row: {row}") + + with self._log_lock: + if not self._log_file.closed: + self._log_csv.writerow(row) + self._log_file.flush() + + def cb_task_update(self, msg: TaskUpdate) -> None: + log = self.get_logger() + + t = time_to_float(msg.latest_sensor_input_time) + + # If we are on step "0" and current_step="background", we are in the + # background state, in which state the logging wants "nulls" in places. + if msg.current_step_id == 0 and msg.current_step == "background": + # In background, transmits "null"s appropriately + self.log_line( + t, "null", "null", "null" + ) + else: + if ( + msg.task_name == "Pinwheel" + and msg.current_step_id == 10 + and not msg.completed_steps[-1] + ): + # Known special case for Pinwheel task where we have omitted a + # step in our configuration due to algorithm performance. + self.log_line(t, "null", "null", "null") + self.log_line(t, msg.task_name, msg.current_step_id+2, "active") + elif np.all(msg.completed_steps): + # If all steps are completed, output nulls to indicate the + # final "done" state. + self.log_line(t, "null", "null", "null", + f"{msg.task_name} task completed") + else: + # Emit a null line to indicate the previous task was completed, + # Except if the last step was background. + if msg.previous_step != "background": + self.log_line( + t, "null", "null", "null", + f"Stopped performing: {msg.previous_step}" + ) + self.log_line( + t, + msg.task_name, + # Steps are 0-index based coming out of the TaskMonitor, bring + # it back into 1-indexed for logging spec. + msg.current_step_id + 1, + "active", + f"Started performing: {msg.current_step}" + ) + + def cb_arui_notification(self, msg: AruiUserNotification) -> None: + # "Error" notification message has the broad step ID in it, so we can + # parse that out via regex. + ... + + def destroy_node(self) -> None: + """ + Clean-up resources + """ + log = self.get_logger() + with self._log_lock: + log.info(f"Closing log file: {self._log_file.name}") + self._log_file.close() + super().destroy_node() + + +def main(): + rclpy.init() + log = rclpy.logging.get_logger("main") + + node = Eval2LoggingNode() + + try: + rclpy.spin(node) + except KeyboardInterrupt: + log.info("Keyboard interrupt, shutting down.\n") + finally: + log.info("Shutting down node and rclpy") + # Destroy the node explicitly + # (optional - otherwise it will be done automatically + # when the garbage collector destroys the node object) + node.destroy_node() + + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/ros/angel_system_nodes/setup.py b/ros/angel_system_nodes/setup.py index 5731d7b94..5b6f3f897 100644 --- a/ros/angel_system_nodes/setup.py +++ b/ros/angel_system_nodes/setup.py @@ -36,6 +36,8 @@ "hl2ss_ros_bridge = angel_system_nodes.data_publishers.hl2ss_ros_bridge:main", "image_timestamp_relay = angel_system_nodes.data_publishers.image_timestamp_relay:main", "redis_ros_bridge = angel_system_nodes.data_publishers.redis_ros_bridge:main", + # Evaluation Components + "eval_2_logger = angel_system_nodes.eval.eval_2_logger:main", # Object detection "berkeley_object_detector = angel_system_nodes.object_detection.berkeley_object_detector:main", "object_detector = angel_system_nodes.object_detection.object_detector:main", diff --git a/tmux/demos/2023-10-eval_prep-from-old-bag.yml b/tmux/demos/2023-10-eval_prep-from-old-bag.yml index d52ceceea..0d6c55d6f 100644 --- a/tmux/demos/2023-10-eval_prep-from-old-bag.yml +++ b/tmux/demos/2023-10-eval_prep-from-old-bag.yml @@ -134,6 +134,13 @@ windows: -p update_topic_input:=TaskUpdates -p update_topic_output:=TaskUpdatesBroad -p task_config_file:="${CONFIG_DIR}/tasks/multi-task-config.yaml" + - eval_2_logger: ros2 run angel_system_nodes eval_2_logger --ros-args + -r __ns:=${ROS_NAMESPACE} + -p topic_task_updates:=TaskUpdatesBroad + -p topic_notifications:=TaskErrors + -p log_output_dir:="${BAGS_DIR}" + -p task_config_file:="${CONFIG_DIR}"/tasks/multi-task-config.yaml + -p activity_config_file:="${CONFIG_DIR}"/activity_labels/all_recipe_labels.yaml - engineering-ui: layout: even-vertical From 1ae1fa388d022aa22eeba75c38700c385dc547b7 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 10:11:30 -0500 Subject: [PATCH 08/13] Style fixups --- .../angel_system_nodes/eval/eval_2_logger.py | 31 +++++++++++-------- .../task_monitoring/transform_update_lod.py | 20 ++++++------ 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py index 2f0cc390d..99153af4d 100644 --- a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py +++ b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py @@ -23,7 +23,9 @@ load_multi_task_config, load_active_task_configs, ) -from angel_system.global_step_prediction.global_step_predictor import GlobalStepPredictor +from angel_system.global_step_prediction.global_step_predictor import ( + GlobalStepPredictor, +) from angel_msgs.msg import ( AruiUserNotification, @@ -61,6 +63,7 @@ ############################################################################### + def ts_str(t: Optional[float] = None) -> str: """ Generate "now" timestamp as a string, used in both filename and log lines. @@ -90,7 +93,7 @@ def __init__(self): (PARAM_TOPIC_TASK_UPDATES,), (PARAM_TOPIC_NOTIFICATIONS,), (PARAM_OUTPUT_DIR,), - ] + ], ) # Unix timestamp of this "trial" for logging. @@ -103,7 +106,7 @@ def __init__(self): # Open file to our logging lines to. Open in non-binary mode for # writing. - self._log_file = open(self._log_output_filepath, 'w') + self._log_file = open(self._log_output_filepath, "w") self._log_csv = csv.writer(self._log_file) # Lock for synchronizing log file writing. self._log_lock = RLock() @@ -143,7 +146,7 @@ def log_line(self, t, task_name, step_number, current_status, comment=None) -> N recipe_id = RECIPE_TO_ID[task_name] if task_name != "null" else task_name except KeyError: log.error( - f"No recipe identifier for task name \"{task_name}\". " + f'No recipe identifier for task name "{task_name}". ' f"Skipping logging. Otherwise input: " f"step_number={step_number}, current_status={current_status}, " f"comment={comment}" @@ -176,9 +179,7 @@ def cb_task_update(self, msg: TaskUpdate) -> None: # background state, in which state the logging wants "nulls" in places. if msg.current_step_id == 0 and msg.current_step == "background": # In background, transmits "null"s appropriately - self.log_line( - t, "null", "null", "null" - ) + self.log_line(t, "null", "null", "null") else: if ( msg.task_name == "Pinwheel" @@ -188,19 +189,23 @@ def cb_task_update(self, msg: TaskUpdate) -> None: # Known special case for Pinwheel task where we have omitted a # step in our configuration due to algorithm performance. self.log_line(t, "null", "null", "null") - self.log_line(t, msg.task_name, msg.current_step_id+2, "active") + self.log_line(t, msg.task_name, msg.current_step_id + 2, "active") elif np.all(msg.completed_steps): # If all steps are completed, output nulls to indicate the # final "done" state. - self.log_line(t, "null", "null", "null", - f"{msg.task_name} task completed") + self.log_line( + t, "null", "null", "null", f"{msg.task_name} task completed" + ) else: # Emit a null line to indicate the previous task was completed, # Except if the last step was background. if msg.previous_step != "background": self.log_line( - t, "null", "null", "null", - f"Stopped performing: {msg.previous_step}" + t, + "null", + "null", + "null", + f"Stopped performing: {msg.previous_step}", ) self.log_line( t, @@ -209,7 +214,7 @@ def cb_task_update(self, msg: TaskUpdate) -> None: # it back into 1-indexed for logging spec. msg.current_step_id + 1, "active", - f"Started performing: {msg.current_step}" + f"Started performing: {msg.current_step}", ) def cb_arui_notification(self, msg: AruiUserNotification) -> None: diff --git a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py index 3c5a5a875..b9c45b9c0 100644 --- a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py +++ b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/transform_update_lod.py @@ -3,7 +3,9 @@ from rclpy.node import Node from angel_system.data.config_structs import load_multi_task_config -from angel_system.global_step_prediction.global_step_predictor import GlobalStepPredictor +from angel_system.global_step_prediction.global_step_predictor import ( + GlobalStepPredictor, +) from angel_msgs.msg import TaskUpdate from angel_utils import declare_and_get_parameters @@ -41,7 +43,7 @@ def __init__(self): (PARAM_TOPIC_INPUT,), (PARAM_TOPIC_OUTPUT,), (PARAM_TASK_CONFIG,), - ] + ], ) # TODO: This is clearly super intrinsic to GSP implementation -- decouple? @@ -55,16 +57,14 @@ def __init__(self): config_multi = load_multi_task_config(params[PARAM_TASK_CONFIG]) self._gsp = GlobalStepPredictor( recipe_types=[t.label for t in config_multi.tasks], - recipe_config_dict={t.label: t.config_file for t in config_multi.tasks} + recipe_config_dict={t.label: t.config_file for t in config_multi.tasks}, ) self._task_to_tracker = {t["recipe"]: t for t in self._gsp.trackers} # Track previous step ID for different self._prev_broad_id = {l: -1 for l in self._task_to_tracker} - self._pub = self.create_publisher( - TaskUpdate, params[PARAM_TOPIC_OUTPUT], 1 - ) + self._pub = self.create_publisher(TaskUpdate, params[PARAM_TOPIC_OUTPUT], 1) self._sub = self.create_subscription( TaskUpdate, params[PARAM_TOPIC_INPUT], @@ -107,10 +107,12 @@ def cb_task_update(self, msg: TaskUpdate) -> None: # 0 if we are actually *on* background (and leave "background" as the # str). msg.current_step_id = max(cur_broad_id - 1, 0) - msg.current_step = tt['broad_step_to_full_str'][cur_broad_id] - msg.previous_step = tt['broad_step_to_full_str'][max(prev_broad_id, 0)] + msg.current_step = tt["broad_step_to_full_str"][cur_broad_id] + msg.previous_step = tt["broad_step_to_full_str"][max(prev_broad_id, 0)] - completed_steps_arr = np.arange(tt['total_num_broad_steps'] - 1) < (cur_broad_id - 1) + completed_steps_arr = np.arange(tt["total_num_broad_steps"] - 1) < ( + cur_broad_id - 1 + ) if msg.completed_steps[-1]: # If the final granular step is done, then so is the final broad step. completed_steps_arr[-1] = True From caf3aec1880a1fdf0aff6d84fe4b7d47b2e9e3ca Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 10:29:24 -0500 Subject: [PATCH 09/13] Add Eval2 logging for emitted error notifications --- .../angel_system_nodes/eval/eval_2_logger.py | 24 +++++++++++++++++-- .../task_monitoring/global_step_predictor.py | 3 ++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py index 99153af4d..67af29f0c 100644 --- a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py +++ b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py @@ -9,9 +9,9 @@ import csv import math from pathlib import Path +import re import time from threading import RLock -from typing import Dict from typing import Optional import numpy as np @@ -60,6 +60,11 @@ "Dessert Quesadilla": "E", } +RE_ERR_DESC = re.compile( + r"Recipe: (?P.*), activity: (?P.*), " + r"broad step: \(id=(?P\d+)\) (?P.*)$" +) + ############################################################################### @@ -220,7 +225,22 @@ def cb_task_update(self, msg: TaskUpdate) -> None: def cb_arui_notification(self, msg: AruiUserNotification) -> None: # "Error" notification message has the broad step ID in it, so we can # parse that out via regex. - ... + if msg.context == AruiUserNotification.N_CONTEXT_TASK_ERROR: + m = RE_ERR_DESC.search(msg.description) + if m: + md = m.groupdict() + self.log_line( + time.time(), + md["task_name"], + md["broad_step_id"], + "error", + msg.description, + ) + else: + self.get_logger().error( + f"Failed to parse error notification for logging: " + f"{msg.description}" + ) def destroy_node(self) -> None: """ diff --git a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py index 75af0efc7..c12e95bc9 100644 --- a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py +++ b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py @@ -340,9 +340,10 @@ def det_callback(self, activity_msg: ActivityDetection): ] break + # "activity_str" is the "full_str" of the activity label. skipped_step_str = ( f"Recipe: {recipe}, activity: {skipped_step['activity_str']}, " - f"broad step: {broad_step_str}" + f"broad step: (id={broad_step_id}) {broad_step_str}" ) # New skipped step detected, publish error and add it to the list From c3b9b7be8a4f597b3916ef9e382dc5b5ab5ff0c4 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 10:37:40 -0500 Subject: [PATCH 10/13] Spread the config love --- tmux/demos/2023-10-eval_prep-from-bag.yml | 49 +++++++++++++++-------- tmux/demos/2023-10-eval_prep-live.yml | 49 +++++++++++++++-------- 2 files changed, 64 insertions(+), 34 deletions(-) diff --git a/tmux/demos/2023-10-eval_prep-from-bag.yml b/tmux/demos/2023-10-eval_prep-from-bag.yml index 7ec2919dd..2f0836bef 100644 --- a/tmux/demos/2023-10-eval_prep-from-bag.yml +++ b/tmux/demos/2023-10-eval_prep-from-bag.yml @@ -109,23 +109,38 @@ windows: -r __ns:=${ROS_NAMESPACE} -p system_command_topic:=SystemCommands - - task_monitor: ros2 run angel_system_nodes global_step_predictor --ros-args - -r __ns:=${ROS_NAMESPACE} - -p det_topic:=ActivityDetections - -p model_file:=${MODEL_DIR}/global_step_predictor_act_avgs_all_classes_v2.0_sample_rate_2.npy - -p threshold_multiplier_weak:=0.05 - -p thresh_frame_count:=$((8 / (30 / ${FRAME_RATE}))) - -p threshold_frame_count_weak:=2 - -p deactivate_thresh_frame_count:=$((20 / (30 / ${FRAME_RATE}))) - -p step_mode:=granular - -p config_file:=${CONFIG_DIR}/tasks/multi-task-config.yaml - -p task_state_topic:=TaskUpdates - -p query_task_graph_topic:=query_task_graph - -p task_error_topic:=TaskErrors - -p system_command_topic:=SystemCommands - #-p gt_activity_mscoco:=model_files/test_activity_preds.mscoco.json - #-p gt_video_id:=8 - #-p gt_output_dir:="${BAGS_DIR}" + - task_monitor: + layout: even-vertical + panes: + - task_monitor: ros2 run angel_system_nodes global_step_predictor --ros-args + -r __ns:=${ROS_NAMESPACE} + -p det_topic:=ActivityDetections + -p model_file:=${MODEL_DIR}/global_step_predictor_act_avgs_all_classes_v2.0_sample_rate_2.npy + -p threshold_multiplier_weak:=0.05 + -p thresh_frame_count:=$((8 / (30 / ${FRAME_RATE}))) + -p threshold_frame_count_weak:=2 + -p deactivate_thresh_frame_count:=$((20 / (30 / ${FRAME_RATE}))) + -p step_mode:=granular + -p config_file:=${CONFIG_DIR}/tasks/multi-task-config.yaml + -p task_state_topic:=TaskUpdates + -p query_task_graph_topic:=query_task_graph + -p task_error_topic:=TaskErrors + -p system_command_topic:=SystemCommands + #-p gt_activity_mscoco:=model_files/test_activity_preds.mscoco.json + #-p gt_video_id:=8 + #-p gt_output_dir:="${BAGS_DIR}" + - transform_lod: ros2 run angel_system_nodes transform_update_lod --ros-args + -r __ns:=${ROS_NAMESPACE} + -p update_topic_input:=TaskUpdates + -p update_topic_output:=TaskUpdatesBroad + -p task_config_file:="${CONFIG_DIR}/tasks/multi-task-config.yaml" + - eval_2_logger: ros2 run angel_system_nodes eval_2_logger --ros-args + -r __ns:=${ROS_NAMESPACE} + -p topic_task_updates:=TaskUpdatesBroad + -p topic_notifications:=TaskErrors + -p log_output_dir:="${BAGS_DIR}" + -p task_config_file:="${CONFIG_DIR}"/tasks/multi-task-config.yaml + -p activity_config_file:="${CONFIG_DIR}"/activity_labels/all_recipe_labels.yaml - engineering-ui: layout: even-vertical diff --git a/tmux/demos/2023-10-eval_prep-live.yml b/tmux/demos/2023-10-eval_prep-live.yml index fa65b4e5e..9e2312ab3 100644 --- a/tmux/demos/2023-10-eval_prep-live.yml +++ b/tmux/demos/2023-10-eval_prep-live.yml @@ -118,23 +118,38 @@ windows: -r __ns:=${ROS_NAMESPACE} -p system_command_topic:=SystemCommands - - task_monitor: ros2 run angel_system_nodes global_step_predictor --ros-args - -r __ns:=${ROS_NAMESPACE} - -p det_topic:=ActivityDetections - -p model_file:=${MODEL_DIR}/global_step_predictor_act_avgs_all_classes_v2.0_sample_rate_2.npy - -p threshold_multiplier_weak:=0.05 - -p thresh_frame_count:=$((8 / (30 / ${FRAME_RATE}))) - -p threshold_frame_count_weak:=2 - -p deactivate_thresh_frame_count:=$((20 / (30 / ${FRAME_RATE}))) - -p step_mode:=granular - -p config_file:=${CONFIG_DIR}/tasks/multi-task-config.yaml - -p task_state_topic:=TaskUpdates - -p query_task_graph_topic:=query_task_graph - -p task_error_topic:=TaskErrors - -p system_command_topic:=SystemCommands - #-p gt_activity_mscoco:=model_files/test_activity_preds.mscoco.json - #-p gt_video_id:=8 - #-p gt_output_dir:="${BAGS_DIR}" + - task_monitor: + layout: even-vertical + panes: + - task_monitor: ros2 run angel_system_nodes global_step_predictor --ros-args + -r __ns:=${ROS_NAMESPACE} + -p det_topic:=ActivityDetections + -p model_file:=${MODEL_DIR}/global_step_predictor_act_avgs_all_classes_v2.0_sample_rate_2.npy + -p threshold_multiplier_weak:=0.05 + -p thresh_frame_count:=$((8 / (30 / ${FRAME_RATE}))) + -p threshold_frame_count_weak:=2 + -p deactivate_thresh_frame_count:=$((20 / (30 / ${FRAME_RATE}))) + -p step_mode:=granular + -p config_file:=${CONFIG_DIR}/tasks/multi-task-config.yaml + -p task_state_topic:=TaskUpdates + -p query_task_graph_topic:=query_task_graph + -p task_error_topic:=TaskErrors + -p system_command_topic:=SystemCommands + #-p gt_activity_mscoco:=model_files/test_activity_preds.mscoco.json + #-p gt_video_id:=8 + #-p gt_output_dir:="${BAGS_DIR}" + - transform_lod: ros2 run angel_system_nodes transform_update_lod --ros-args + -r __ns:=${ROS_NAMESPACE} + -p update_topic_input:=TaskUpdates + -p update_topic_output:=TaskUpdatesBroad + -p task_config_file:="${CONFIG_DIR}/tasks/multi-task-config.yaml" + - eval_2_logger: ros2 run angel_system_nodes eval_2_logger --ros-args + -r __ns:=${ROS_NAMESPACE} + -p topic_task_updates:=TaskUpdatesBroad + -p topic_notifications:=TaskErrors + -p log_output_dir:="${BAGS_DIR}" + -p task_config_file:="${CONFIG_DIR}"/tasks/multi-task-config.yaml + -p activity_config_file:="${CONFIG_DIR}"/activity_labels/all_recipe_labels.yaml - feedback_generator: ros2 run angel_system_nodes feedback_generator --ros-args -r __ns:=${ROS_NAMESPACE} From c6d4ac32e89bfc5e8a292ea3659afd8fd0f65e2a Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 10:37:56 -0500 Subject: [PATCH 11/13] Use common constant vars instead of floating strings --- .../angel_system_nodes/eval/eval_2_logger.py | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py index 67af29f0c..272b0304d 100644 --- a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py +++ b/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py @@ -59,12 +59,19 @@ "Oatmeal": "D", "Dessert Quesadilla": "E", } +RECIPE_NULL = "null" + +NO_STEP_NUMBER = "null" RE_ERR_DESC = re.compile( r"Recipe: (?P.*), activity: (?P.*), " r"broad step: \(id=(?P\d+)\) (?P.*)$" ) +STATUS_ACTIVE = "active" +STATUS_ERROR = "error" +STATUS_NULL = "null" + ############################################################################### @@ -148,7 +155,9 @@ def log_line(self, t, task_name, step_number, current_status, comment=None) -> N # Translate inputs into required format values try: - recipe_id = RECIPE_TO_ID[task_name] if task_name != "null" else task_name + recipe_id = ( + RECIPE_TO_ID[task_name] if task_name != RECIPE_NULL else task_name + ) except KeyError: log.error( f'No recipe identifier for task name "{task_name}". ' @@ -184,7 +193,7 @@ def cb_task_update(self, msg: TaskUpdate) -> None: # background state, in which state the logging wants "nulls" in places. if msg.current_step_id == 0 and msg.current_step == "background": # In background, transmits "null"s appropriately - self.log_line(t, "null", "null", "null") + self.log_line(t, RECIPE_NULL, NO_STEP_NUMBER, STATUS_NULL) else: if ( msg.task_name == "Pinwheel" @@ -193,13 +202,17 @@ def cb_task_update(self, msg: TaskUpdate) -> None: ): # Known special case for Pinwheel task where we have omitted a # step in our configuration due to algorithm performance. - self.log_line(t, "null", "null", "null") - self.log_line(t, msg.task_name, msg.current_step_id + 2, "active") + self.log_line(t, RECIPE_NULL, NO_STEP_NUMBER, STATUS_NULL) + self.log_line(t, msg.task_name, msg.current_step_id + 2, STATUS_ACTIVE) elif np.all(msg.completed_steps): # If all steps are completed, output nulls to indicate the # final "done" state. self.log_line( - t, "null", "null", "null", f"{msg.task_name} task completed" + t, + RECIPE_NULL, + NO_STEP_NUMBER, + STATUS_NULL, + f"{msg.task_name} task completed", ) else: # Emit a null line to indicate the previous task was completed, @@ -207,9 +220,9 @@ def cb_task_update(self, msg: TaskUpdate) -> None: if msg.previous_step != "background": self.log_line( t, - "null", - "null", - "null", + RECIPE_NULL, + NO_STEP_NUMBER, + STATUS_NULL, f"Stopped performing: {msg.previous_step}", ) self.log_line( @@ -218,7 +231,7 @@ def cb_task_update(self, msg: TaskUpdate) -> None: # Steps are 0-index based coming out of the TaskMonitor, bring # it back into 1-indexed for logging spec. msg.current_step_id + 1, - "active", + STATUS_ACTIVE, f"Started performing: {msg.current_step}", ) @@ -233,7 +246,7 @@ def cb_arui_notification(self, msg: AruiUserNotification) -> None: time.time(), md["task_name"], md["broad_step_id"], - "error", + STATUS_ERROR, msg.description, ) else: From 30259df480f84a576e3713fa84cc7644e284fa14 Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 11:44:17 -0500 Subject: [PATCH 12/13] Added some TODO's to use config structures instead of raw loads --- .../global_step_prediction/global_step_predictor.py | 10 ++++++++++ .../task_monitoring/global_step_predictor.py | 2 ++ 2 files changed, 12 insertions(+) diff --git a/angel_system/global_step_prediction/global_step_predictor.py b/angel_system/global_step_prediction/global_step_predictor.py index a41105807..124c867cb 100644 --- a/angel_system/global_step_prediction/global_step_predictor.py +++ b/angel_system/global_step_prediction/global_step_predictor.py @@ -28,6 +28,8 @@ def __init__( GlobalStepPredctor: based on a TCN activity classifier's activity classification outputs + a set of recipes, track what step a user is on for multiple recipes. """ + # TODO: make use of angel_system.data.config_structs instead of + # manually loading and accessing by string keys. with open(activity_config_fpath, "r") as stream: self.activity_config = yaml.safe_load(stream) num_activity_classes = len(self.activity_config["labels"]) @@ -68,6 +70,8 @@ def __init__( self.activity_conf_history = np.empty((0, num_activity_classes)) self.recipe_types = recipe_types + # TODO: Expect use of angel_system.data.config_structs instead of + # a raw dictionary. self.recipe_configs = recipe_config_dict # Array of tracker dicts @@ -116,6 +120,8 @@ def get_activity_order_from_config(self, config_fn): Get the order of activity_ids (mapping to granular step number) based on a recipe config """ + # TODO: make use of angel_system.data.config_structs instead of + # manually loading and accessing by string keys. with open(config_fn, "r") as stream: config = yaml.safe_load(stream) broad_steps = config["labels"] @@ -208,6 +214,8 @@ def initialize_new_recipe_tracker(self, recipe, config_fn=None): config_fn = self.recipe_configs[recipe] # Read in task config + # TODO: make use of angel_system.data.config_structs instead of + # manually loading and accessing by string keys. with open(config_fn, "r") as stream: config = yaml.safe_load(stream) labels = [self.sanitize_str(l["full_str"]) for l in config["labels"]] @@ -1025,6 +1033,8 @@ def get_gt_steps_from_gt_activities(self, video_dset, config_fn): def sanitize_str(str_: str): return str_.lower().strip(" .") + # TODO: make use of angel_system.data.config_structs instead of + # manually loading and accessing by string keys. with open(config_fn, "r") as stream: config = yaml.safe_load(stream) labels = [sanitize_str(l["label"]) for l in config["labels"]] diff --git a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py index c12e95bc9..9ebee61ee 100644 --- a/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py +++ b/ros/angel_system_nodes/angel_system_nodes/task_monitoring/global_step_predictor.py @@ -102,6 +102,8 @@ def __init__(self): ) # Determine what recipes are in the config + # TODO: make use of angel_system.data.config_structs instead of + # manually loading and accessing by string keys. with open(self._config_file, "r") as stream: config = yaml.safe_load(stream) recipe_types = [ From 7709b631813eea6ac8558a7cf06ffc814fbf770c Mon Sep 17 00:00:00 2001 From: Paul Tunison Date: Fri, 17 Nov 2023 11:52:52 -0500 Subject: [PATCH 13/13] Renamed eval logger to be specific to MIT-LL --- .../eval/{eval_2_logger.py => mitll_eval_2_logger.py} | 0 ros/angel_system_nodes/setup.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename ros/angel_system_nodes/angel_system_nodes/eval/{eval_2_logger.py => mitll_eval_2_logger.py} (100%) diff --git a/ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py b/ros/angel_system_nodes/angel_system_nodes/eval/mitll_eval_2_logger.py similarity index 100% rename from ros/angel_system_nodes/angel_system_nodes/eval/eval_2_logger.py rename to ros/angel_system_nodes/angel_system_nodes/eval/mitll_eval_2_logger.py diff --git a/ros/angel_system_nodes/setup.py b/ros/angel_system_nodes/setup.py index 5b6f3f897..81c576c9a 100644 --- a/ros/angel_system_nodes/setup.py +++ b/ros/angel_system_nodes/setup.py @@ -37,7 +37,7 @@ "image_timestamp_relay = angel_system_nodes.data_publishers.image_timestamp_relay:main", "redis_ros_bridge = angel_system_nodes.data_publishers.redis_ros_bridge:main", # Evaluation Components - "eval_2_logger = angel_system_nodes.eval.eval_2_logger:main", + "eval_2_logger = angel_system_nodes.eval.mitll_eval_2_logger:main", # Object detection "berkeley_object_detector = angel_system_nodes.object_detection.berkeley_object_detector:main", "object_detector = angel_system_nodes.object_detection.object_detector:main",