-
Notifications
You must be signed in to change notification settings - Fork 51
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* feat: splitting xgboost train into a feature extractor class * in progress: * consolidate xgboost in feature extractor * consolidate feature extractor into base class * rerun examples * add categorical suport * update docstrings * fix formatting
- Loading branch information
1 parent
6247a15
commit 56fa631
Showing
21 changed files
with
1,956 additions
and
1,777 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
import pytest | ||
|
||
from tests.data import get_data | ||
from xgbse._feature_extractors import FeatureExtractor | ||
|
||
( | ||
X_train, | ||
X_test, | ||
X_valid, | ||
T_train, | ||
T_test, | ||
T_valid, | ||
E_train, | ||
E_test, | ||
E_valid, | ||
y_train, | ||
y_test, | ||
y_valid, | ||
features, | ||
) = get_data() | ||
|
||
|
||
def test_wrong_objective(): | ||
with pytest.raises(ValueError): | ||
FeatureExtractor(xgb_params={"objective": "reg:squarederror"}) | ||
|
||
|
||
def test_no_objective(): | ||
assert FeatureExtractor(xgb_params={}).xgb_params["objective"] == "survival:aft" | ||
|
||
|
||
def test_predict_leaves_early_stop(): | ||
xgbse = FeatureExtractor() | ||
xgbse.fit( | ||
X_train, | ||
y_train, | ||
num_boost_round=10000, | ||
validation_data=(X_valid, y_valid), | ||
early_stopping_rounds=10, | ||
verbose_eval=0, | ||
) | ||
prediction = xgbse.predict_leaves(X_test) | ||
assert prediction.shape == ( | ||
X_test.shape[0], | ||
xgbse.bst.best_iteration + 1, | ||
) | ||
|
||
|
||
def test_predict_leaves_no_early_stop(): | ||
xgbse = FeatureExtractor() | ||
xgbse.fit( | ||
X_train, | ||
y_train, | ||
num_boost_round=100, | ||
validation_data=(X_valid, y_valid), | ||
early_stopping_rounds=None, | ||
verbose_eval=0, | ||
) | ||
assert xgbse.predict_leaves(X_test).shape == (X_test.shape[0], 100) | ||
|
||
|
||
def test_predict_hazard_early_stop(): | ||
xgbse = FeatureExtractor() | ||
xgbse.fit( | ||
X_train, | ||
y_train, | ||
num_boost_round=10000, | ||
validation_data=(X_valid, y_valid), | ||
early_stopping_rounds=10, | ||
verbose_eval=0, | ||
) | ||
assert xgbse.predict_hazard(X_test).shape == (X_test.shape[0],) | ||
|
||
|
||
def test_predict_hazard_no_early_stop(): | ||
xgbse = FeatureExtractor() | ||
xgbse.fit( | ||
X_train, | ||
y_train, | ||
num_boost_round=100, | ||
validation_data=(X_valid, y_valid), | ||
early_stopping_rounds=None, | ||
verbose_eval=0, | ||
) | ||
assert xgbse.predict_hazard(X_test).shape == (X_test.shape[0],) | ||
|
||
|
||
def test_feature_importances(): | ||
xgbse = FeatureExtractor() | ||
xgbse.fit( | ||
X_train, | ||
y_train, | ||
num_boost_round=100, | ||
validation_data=(X_valid, y_valid), | ||
early_stopping_rounds=None, | ||
verbose_eval=0, | ||
) | ||
assert xgbse.feature_importances_ == xgbse.bst.get_score() | ||
|
||
|
||
def test_predict_not_fitted(): | ||
xgbse = FeatureExtractor() | ||
with pytest.raises(ValueError): | ||
xgbse.predict_leaves(X_test) | ||
with pytest.raises(ValueError): | ||
xgbse.predict_hazard(X_test) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.