-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathgrid_search_rf.py
86 lines (72 loc) · 2.88 KB
/
grid_search_rf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import pandas as pd
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import preprocessing
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# Read data #
train_data = pd.read_csv('dataSets/train_set.csv', encoding='utf-8', sep="\t")
# Drop useless columns #
train_data = train_data.drop(['RowNum', 'Id', 'Title'], axis=1)
y_train = train_data["Category"]
X_train = train_data["Content"]
# Add labels #
le = preprocessing.LabelEncoder()
X_train_le = le.fit_transform(y_train)
X_train_cat = le.inverse_transform(X_train_le)
# Create matrix of TF-IDF features #
tfidf_vectorizer = TfidfVectorizer(stop_words=ENGLISH_STOP_WORDS)
X_train_tfidf = tfidf_vectorizer.fit_transform(X_train)
# Use LSA for dimensionality reduction #
svd = TruncatedSVD(n_components=100, random_state=123)
# Perform dimensionality reduction #
X_train_reduced = svd.fit_transform(X_train_tfidf)
# 10-fold #
kf = StratifiedKFold(n_splits=10, random_state=123)
# Classifier #
clf = RandomForestClassifier()
# RF #
# Note: Hyperparameters will be selected to #
# be the best based also on time to train the #
# model #
# Best hyperparameters #
# n_estimators=100 #
# criterion=entropy #
# max_features=auto #
# bootstrap=False #
# warm_start=True #
# Tune hyperparameters #
parameters = {
"n_estimators": [10, 30, 100],
"criterion": ["gini", "entropy"],
"max_features": ["auto", "sqrt"],
"bootstrap": [False],
"warm_start": [True],
"random_state": [123]
}
# Notes #
# n_estimators: number of trees #
# criterion: how to split tree #
# gini: the probability of a #
# random sample being classified #
# correctly if we randomly pick #
# a label according to the #
# distribution in a branch #
# entropy: measurement of #
# information -> may be slower #
# max_features: number of #
# features to consider when #
# looking for the best split #
# min_samples_split: The minimum #
# number of samples required to #
# split an internal node: #
# bootstrap: bootstrap samples #
# warm_start: reuse trees #
# Use grid search with 10-fold cross validation #
gs_clf = GridSearchCV(clf, parameters, cv=kf)
gs_clf = gs_clf.fit(X_train_reduced, X_train_le)
# Print results #
print("Random forest best parameters: ")
print(gs_clf.best_params_)