Skip to content

Commit

Permalink
FIX use a lock file to ensure thread safety
Browse files Browse the repository at this point in the history
  • Loading branch information
dantegd committed Feb 6, 2025
1 parent 9d08f5c commit 165dac6
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 18 deletions.
1 change: 1 addition & 0 deletions python/cuml/cuml/ensemble/randomforest_common.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ class BaseRandomForestModel(UniversalBase):
self.model_pbuf_bytes = bytearray()
self.treelite_handle = None
self.treelite_serialized_model = None
self._cpu_model_class_lock = threading.RLock()

def _get_max_feat_val(self) -> float:
if isinstance(self.max_features, int):
Expand Down
14 changes: 9 additions & 5 deletions python/cuml/cuml/ensemble/randomforestclassifier.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -862,13 +862,17 @@ class RandomForestClassifier(BaseRandomForestModel,
# temporarily here just for treelite internal check and
# restore the __class__ at the end of the method.
if GlobalSettings().accelerator_active:
cls_cahed = self._cpu_model.__class__
self._cpu_model.__class__ = sys.modules['sklearn.ensemble'].RandomForestClassifier
with self._cpu_model_class_lock:
original_class = self._cpu_model.__class__
self._cpu_model.__class__ = sys.modules['sklearn.ensemble'].RandomForestClassifier

super().cpu_to_gpu()
try:
super().cpu_to_gpu()
finally:
self._cpu_model.__class__ = original_class

if GlobalSettings().accelerator_active:
self._cpu_model.__class__ = cls_cahed
else:
super().cpu_to_gpu()

@classmethod
def _hyperparam_translator(cls, **kwargs):
Expand Down
14 changes: 9 additions & 5 deletions python/cuml/cuml/ensemble/randomforestregressor.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -785,13 +785,17 @@ class RandomForestRegressor(BaseRandomForestModel,
# temporarily here just for treelite internal check and
# restore the __class__ at the end of the method.
if GlobalSettings().accelerator_active:
cls_cahed = self._cpu_model.__class__
self._cpu_model.__class__ = sys.modules['sklearn.ensemble'].RandomForestRegressor
with self._cpu_model_class_lock:
original_class = self._cpu_model.__class__
self._cpu_model.__class__ = sys.modules['sklearn.ensemble'].RandomForestRegressor

super().cpu_to_gpu()
try:
super().cpu_to_gpu()
finally:
self._cpu_model.__class__ = original_class

if GlobalSettings().accelerator_active:
self._cpu_model.__class__ = cls_cahed
else:
super().cpu_to_gpu()

@classmethod
def _hyperparam_translator(cls, **kwargs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,11 +189,3 @@ def test_rf_max_samples_reg(regression_data, max_samples):
)
reg.fit(X, y)
_ = r2_score(y, reg.predict(X))


def test_rf_random_state_reg(regression_data):
X, y = regression_data
reg1 = RandomForestRegressor(n_estimators=50, random_state=42).fit(X, y)
reg2 = RandomForestRegressor(n_estimators=50, random_state=42).fit(X, y)
# Predictions should be identical when using the same random_state.
assert np.allclose(reg1.predict(X), reg2.predict(X))

0 comments on commit 165dac6

Please sign in to comment.