From 3af11c48fe86dff4b3810767fd7fe07ea3502d93 Mon Sep 17 00:00:00 2001 From: Floris-Jan Willemsen Date: Thu, 25 Apr 2024 17:38:53 +0200 Subject: [PATCH] Implemented number of iterations per strategy, allowing automatic supplementation of missing runtimes --- .../methodology_paper_evaluation.json | 1 + src/autotuning_methodology/runner.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/experiment_files/methodology_paper_evaluation.json b/experiment_files/methodology_paper_evaluation.json index 4cb8e38..607c0da 100644 --- a/experiment_files/methodology_paper_evaluation.json +++ b/experiment_files/methodology_paper_evaluation.json @@ -41,6 +41,7 @@ "compare_split_times": false }, "strategy_defaults": { + "iterations": 32, "repeats": 100, "minimum_number_of_evaluations": 20, "stochastic": true, diff --git a/src/autotuning_methodology/runner.py b/src/autotuning_methodology/runner.py index 91af1f7..ab37d62 100755 --- a/src/autotuning_methodology/runner.py +++ b/src/autotuning_methodology/runner.py @@ -131,6 +131,15 @@ def tune_with_kerneltuner(): metadata, results = get_results_and_metadata( filename_results=kernel.file_path_results, filename_metadata=kernel.file_path_metadata ) + # check that the number of iterations is correct + if "iterations" in strategy: + for result in results: + if "runtime" in result: + num_iters = len(results[0]["runtimes"]) + assert ( + strategy["iterations"] == num_iters + ), f"Specified {strategy['iterations']=} not equal to actual number of iterations ({num_iters})" + break if "max_fevals" in strategy["options"]: max_fevals = strategy["options"]["max_fevals"] if len(results) < max_fevals * 0.1: @@ -250,6 +259,16 @@ def import_from_KTT(use_param_mapping=True, use_bruteforce_objective=True): duration = searchspace_stats.get_value_in_config(config_string_key, "time") else: duration = np.mean(times_runtimes) + assert ( + "iterations" in strategy + ), "For imported KTT runs, the number of iterations must be specified in the strategy in the experiments file" + if strategy["iterations"] != len(times_runtimes): + times_runtimes = [np.mean(times_runtimes)] * strategy["iterations"] + warnings.warn( + f"The specified number of iterations ({strategy['iterations']}) did not equal" + + f"the actual number of iterations ({len(times_runtimes)}). " + + "The average has been used." + ) times_search_algorithm = timemapper(config_attempt.get("SearcherOverhead", 0)) times_validation = timemapper(config_attempt.get("ValidationOverhead", 0)) times_framework = timemapper(config_attempt.get("DataMovementOverhead", 0))