diff --git a/skl2onnx/shape_calculators/feature_selection.py b/skl2onnx/shape_calculators/feature_selection.py index 3f935ea3c..8c1164817 100644 --- a/skl2onnx/shape_calculators/feature_selection.py +++ b/skl2onnx/shape_calculators/feature_selection.py @@ -9,7 +9,7 @@ def calculate_sklearn_select(operator): check_input_and_output_numbers(operator, output_count_range=1) i = operator.inputs[0] N = i.get_first_dimension() - C = operator.raw_operator._get_support_mask().sum() + C = operator.raw_operator.get_support().sum() operator.outputs[0].type = i.type.__class__([N, C]) diff --git a/tests/test_sklearn_feature_selection_converters.py b/tests/test_sklearn_feature_selection_converters.py index 3a6c48fbe..7ede71464 100644 --- a/tests/test_sklearn_feature_selection_converters.py +++ b/tests/test_sklearn_feature_selection_converters.py @@ -51,6 +51,26 @@ def test_generic_univariate_select_int(self): X, model, model_onnx, basename="SklearnGenericUnivariateSelect" ) + def test_generic_univariate_select_kbest_int(self): + model = GenericUnivariateSelect(mode="k_best", param=2) + + X = np.array( + [[1, 2, 3, 1], [0, 3, 1, 4], [3, 5, 6, 1], [1, 2, 1, 5]], dtype=np.int64 + ) + y = np.array([0, 1, 0, 1]) + model.fit(X, y) + + model_onnx = convert_sklearn( + model, + "generic univariate select", + [("input", Int64TensorType([None, X.shape[1]]))], + target_opset=TARGET_OPSET, + ) + self.assertTrue(model_onnx is not None) + dump_data_and_model( + X, model, model_onnx, basename="SklearnGenericUnivariateSelect" + ) + def test_rfe_int(self): model = RFE(estimator=SVR(kernel="linear")) X = np.array(