Skip to content

Commit

Permalink
Merge pull request #222 from smoors/osu_mixin
Browse files Browse the repository at this point in the history
use mixin class for osu
  • Loading branch information
casparvl authored Jan 21, 2025
2 parents 9244193 + c33114e commit 6785a0d
Show file tree
Hide file tree
Showing 3 changed files with 111 additions and 170 deletions.
9 changes: 6 additions & 3 deletions eessi/testsuite/eessi_mixin.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from reframe.core.builtins import parameter, run_after, variable
from reframe.core.builtins import parameter, run_after, run_before, variable
from reframe.core.exceptions import ReframeFatalError
from reframe.core.pipeline import RegressionMixin
from reframe.utility.sanity import make_performance_function
Expand Down Expand Up @@ -44,6 +44,8 @@ class EESSI_Mixin(RegressionMixin):
scale = parameter(SCALES.keys())
bench_name = None
bench_name_ci = None
num_tasks_per_compute_unit = 1
always_request_gpus = None

# Create ReFrame variables for logging runtime environment information
cvmfs_repo_name = variable(str, value='None')
Expand Down Expand Up @@ -127,7 +129,7 @@ def run_after_init(self):
# Set scales as tags
hooks.set_tag_scale(self)

@run_after('init')
@run_before('setup', always_last=True)
def measure_mem_usage(self):
if self.measure_memory_usage:
hooks.measure_memory_usage(self)
Expand Down Expand Up @@ -172,7 +174,8 @@ def validate_setup(self):
@run_after('setup')
def assign_tasks_per_compute_unit(self):
"""Call hooks to assign tasks per compute unit, set OMP_NUM_THREADS, and set compact process binding"""
hooks.assign_tasks_per_compute_unit(test=self, compute_unit=self.compute_unit)
hooks.assign_tasks_per_compute_unit(test=self, compute_unit=self.compute_unit,
num_per=self.num_tasks_per_compute_unit)

# Set OMP_NUM_THREADS environment variable
hooks.set_omp_num_threads(self)
Expand Down
7 changes: 6 additions & 1 deletion eessi/testsuite/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ def _assign_default_num_gpus_per_node(test: rfm.RegressionTest):
# no default set yet, so setting one
test.default_num_gpus_per_node = math.ceil(test.max_avail_gpus_per_node / test.node_part)

log(f'default_num_gpus_per_node set to {test.default_num_gpus_per_node}')


def assign_tasks_per_compute_unit(test: rfm.RegressionTest, compute_unit: str, num_per: int = 1):
"""
Expand All @@ -83,6 +85,8 @@ def assign_tasks_per_compute_unit(test: rfm.RegressionTest, compute_unit: str, n
- assign_tasks_per_compute_unit(test, COMPUTE_UNIT[CPU_SOCKET]) will launch 2 tasks with 64 threads per task
"""
log(f'assign_tasks_per_compute_unit called with compute_unit: {compute_unit} and num_per: {num_per}')

if num_per != 1 and compute_unit not in [COMPUTE_UNIT[NODE]]:
raise NotImplementedError(
f'Non-default num_per {num_per} is not implemented for compute_unit {compute_unit}.')
Expand Down Expand Up @@ -713,7 +717,8 @@ def _check_always_request_gpus(test: rfm.RegressionTest):
"""
Make sure we always request enough GPUs if required for the current GPU partition (cluster-specific policy)
"""
if FEATURES[ALWAYS_REQUEST_GPUS] in test.current_partition.features and not test.num_gpus_per_node:
always_request_gpus = FEATURES[ALWAYS_REQUEST_GPUS] in test.current_partition.features or test.always_request_gpus
if always_request_gpus and not test.num_gpus_per_node:
test.num_gpus_per_node = test.default_num_gpus_per_node
log(f'num_gpus_per_node set to {test.num_gpus_per_node} for partition {test.current_partition.name}')

Expand Down
Loading

0 comments on commit 6785a0d

Please sign in to comment.