Skip to content

Commit

Permalink
Fixed wrong penalty mean measurement and added estimation error to th…
Browse files Browse the repository at this point in the history
…e penalty

- Added a measurement to the penalty output to measure its difference between the real average mean and the one thought by the controller. Usually this difference will be non zero in cases with perturbation where the controller takes the wrong action.
  • Loading branch information
Etienne committed Jun 27, 2024
1 parent c182693 commit fbb035f
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 11 deletions.
6 changes: 6 additions & 0 deletions ros_queue_experiments/scripts/common_experiment_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,12 @@ def __init__(self, metric_name: str = ""):
self.target_error_dispersion = Series()
self.target_error_dispersion.variable_name = "err_target_dispersion"

self.mean_value = Series()
self.mean_value.variable_name = "mean_value"

self.mean_value_dispersion = Series()
self.mean_value_dispersion.variable_name = "mean_value_dispersion"

class ControllerEndMetricStruct:
def __init__(self):
self.metric = {"localization": EndMetricStruct("localization"),
Expand Down
31 changes: 20 additions & 11 deletions ros_queue_experiments/scripts/experiment3_definition.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,16 @@ def generateOutput(self, time_init, bag_name, generate_plots = True, base_init_t
controller_end_struct = self.multi_controller_end_struct.controller_end_metrics[controller_type]

for metric_name in common_experiment_utils.metric_type_list:
controller_end_struct.metric[metric_name].target_error.values = [controller_performance_metric_dict[metric_name].real_continuous_average_diff_with_target.values[-1]]
if controller_type == "Rew_NoInv" or controller_type == "Rew_Inv":
controller_end_struct.metric[metric_name].estimation_error.values = [controller_performance_metric_dict[metric_name].absolute_real_continuous_average_diff_with_server_time_average.values[-1]]

if metric_name == "penalty":
controller_end_struct.metric[metric_name].mean_value.values = [controller_performance_metric_dict[metric_name].real_average_value.values[-1]]
controller_end_struct.metric[metric_name].estimation_error.values = [controller_performance_metric_dict[metric_name].real_continuous_average_diff_with_server_mean.values[-1]]
else:
controller_end_struct.metric[metric_name].estimation_error.values = [controller_performance_metric_dict[metric_name].absolute_real_continuous_average_diff_with_server_mean.values[-1]]
controller_end_struct.metric[metric_name].target_error.values = [controller_performance_metric_dict[metric_name].real_continuous_average_diff_with_target.values[-1]]
if controller_type == "Rew_NoInv" or controller_type == "Rew_Inv":
controller_end_struct.metric[metric_name].estimation_error.values = [controller_performance_metric_dict[metric_name].absolute_real_continuous_average_diff_with_server_time_average.values[-1]]
else:
controller_end_struct.metric[metric_name].estimation_error.values = [controller_performance_metric_dict[metric_name].absolute_real_continuous_average_diff_with_server_mean.values[-1]]

# Create output CSV
separator_second_graph = common_experiment_utils.Series()
Expand Down Expand Up @@ -207,7 +212,7 @@ def generateMultiSetupOutputs(self, analyser_dict: Dict[str, SubExperiment3Analy
# ======= Create output CSV =======

# Create variable names
nb_lines = 7 * 4 # 6 fields per controller and there are 4 controllers
nb_lines = 8 * 4 # 8 fields per controller and there are 4 controllers
controller_name_spacers = common_experiment_utils.Series()
controller_name_spacers.variable_name = "Controller type"
metric_name_spacers = common_experiment_utils.Series()
Expand All @@ -224,16 +229,16 @@ def generateMultiSetupOutputs(self, analyser_dict: Dict[str, SubExperiment3Analy
for index in range(nb_lines):
if index == 0:
controller_name_spacers.values.append("NoRew_NoInv")
elif index == 7:
elif index == 8:
controller_name_spacers.values.append("NoRew_Inv")
elif index == 14:
elif index == 16:
controller_name_spacers.values.append("Rew_NoInv")
elif index == 21:
elif index == 24:
controller_name_spacers.values.append("Rew_Inv")
else:
controller_name_spacers.values.append("")

metric_index = index % 7
metric_index = index % 8
if metric_index == 0:
metric_name_spacers.values.append("Localization")
error_type_spaces.values.append("Estimation error (m)")
Expand All @@ -254,7 +259,10 @@ def generateMultiSetupOutputs(self, analyser_dict: Dict[str, SubExperiment3Analy
error_type_spaces.values.append("Departure-arrival diff (Task/s)")
elif metric_index == 6:
metric_name_spacers.values.append("Penalty")
error_type_spaces.values.append("Penalty (J)")
error_type_spaces.values.append("Penalty estimation error (J)")
elif metric_index == 7:
metric_name_spacers.values.append("")
error_type_spaces.values.append("Penalty mean (J)")
else:
metric_name_spacers.values.append("")

Expand All @@ -273,7 +281,8 @@ def generateMultiSetupOutputs(self, analyser_dict: Dict[str, SubExperiment3Analy
setup_series[setup_name].values.append(controller_end_metrics.metric["temperature"].target_error.values[0])
setup_series[setup_name].values.append(controller_end_metrics.metric["low_temperature"].target_error.values[0])
setup_series[setup_name].values.append(controller_end_metrics.metric["real_queue"].target_error.values[0])
setup_series[setup_name].values.append(controller_end_metrics.metric["penalty"].target_error.values[0])
setup_series[setup_name].values.append(controller_end_metrics.metric["penalty"].estimation_error.values[0])
setup_series[setup_name].values.append(controller_end_metrics.metric["penalty"].mean_value.values[0])
series_to_record.append(setup_series[setup_name])


Expand Down

0 comments on commit fbb035f

Please sign in to comment.