Skip to content

Commit

Permalink
Benchmarks: make names more clear (#1684)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #1684

'full_run' -> 'multiple_problems_methods' (problems x methods x seeds)
'test' -> 'one_problem_method' (and multiple seeds)
'replication' -> 'replication' (one problem-method-seed)

Also `arc lint`

Reviewed By: Balandat

Differential Revision: D46989156

fbshipit-source-id: 767b03d35dbd59c50f7fe006d94f2a2d7e3218f3
  • Loading branch information
esantorella authored and facebook-github-bot committed Jun 24, 2023
1 parent f2aa68e commit 94363dd
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 10 deletions.
12 changes: 9 additions & 3 deletions ax/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def benchmark_replication(
)


def benchmark_test(
def benchmark_one_method_problem(
problem: BenchmarkProblem,
method: BenchmarkMethod,
seeds: Iterable[int],
Expand All @@ -140,12 +140,18 @@ def benchmark_test(
)


def benchmark_full_run(
def benchmark_multiple_problems_methods(
problems: Iterable[BenchmarkProblem],
methods: Iterable[BenchmarkMethod],
seeds: Iterable[int],
) -> List[AggregatedBenchmarkResult]:
"""
For each `problem` and `method` in the Cartesian product of `problems` and
`methods`, run the replication on each seed in `seeds` and get the results
as an `AggregatedBenchmarkResult`, then return a list of each
`AggregatedBenchmarkResult`.
"""
return [
benchmark_test(problem=p, method=m, seeds=seeds)
benchmark_one_method_problem(problem=p, method=m, seeds=seeds)
for p, m in product(problems, methods)
]
16 changes: 9 additions & 7 deletions ax/benchmark/tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@

import numpy as np
from ax.benchmark.benchmark import (
benchmark_full_run,
benchmark_multiple_problems_methods,
benchmark_one_method_problem,
benchmark_replication,
benchmark_test,
)
from ax.benchmark.benchmark_method import BenchmarkMethod
from ax.benchmark.benchmark_problem import SingleObjectiveBenchmarkProblem
Expand Down Expand Up @@ -58,9 +58,9 @@ def test_replication_moo(self) -> None:

self.assertTrue(np.all(res.score_trace <= 100))

def test_test(self) -> None:
def test_benchmark_one_method_problem(self) -> None:
problem = get_single_objective_benchmark_problem()
agg = benchmark_test(
agg = benchmark_one_method_problem(
problem=problem,
method=get_sobol_benchmark_method(),
seeds=(0, 1),
Expand All @@ -79,8 +79,8 @@ def test_test(self) -> None:
self.assertTrue((agg.score_trace[col] <= 100).all())

@fast_botorch_optimize
def test_full_run(self) -> None:
aggs = benchmark_full_run(
def test_benchmark_multiple_problems_methods(self) -> None:
aggs = benchmark_multiple_problems_methods(
problems=[get_single_objective_benchmark_problem()],
methods=[get_sobol_benchmark_method(), get_sobol_gpei_benchmark_method()],
seeds=(0, 1),
Expand Down Expand Up @@ -123,7 +123,9 @@ def test_timeout(self) -> None:
)

# Each replication will have a different number of trials
result = benchmark_test(problem=problem, method=method, seeds=(0, 1, 2, 3))
result = benchmark_one_method_problem(
problem=problem, method=method, seeds=(0, 1, 2, 3)
)

# Test the traces get composited correctly. The AggregatedResult's traces
# should be the length of the shortest trace in the BenchmarkResults
Expand Down

0 comments on commit 94363dd

Please sign in to comment.