-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsentiment_analysis_one_hot.py
76 lines (60 loc) · 2.23 KB
/
sentiment_analysis_one_hot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from keras.layers import Lambda
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Conv1D
from keras.callbacks import EarlyStopping
# training params
batch_size = 1024
num_epochs = 15
# model parameters
num_filters = 64
embed_dim = 300
weight_decay = 1e-4
filters = 600
kernel_size = 3
MAX_NB_WORDS = 100000
def OneHot(input_dim=None, input_length=None):
# Check if inputs were supplied correctly
if input_dim is None or input_length is None:
raise TypeError("input_dim or input_length is not set")
# Helper method (not inlined for clarity)
def _one_hot(x, num_classes):
return K.one_hot(K.cast(x, 'uint8'),
num_classes=num_classes)
# Final layer representation as a Lambda layer
return Lambda(_one_hot,
arguments={'num_classes': input_dim},
input_shape=(input_length,))
def build_one_hot_model(nb_words, max_seq_len):
print("training CNN ...")
model = Sequential()
model.add(OneHot(input_dim=nb_words, input_length=max_seq_len))
model.add(Dropout(0.4))
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
model.add(Conv1D(300, kernel_size, padding='valid', activation='relu', strides=1))
model.add(Conv1D(150, kernel_size, padding='valid', activation='relu', strides=1))
model.add(Conv1D(75, kernel_size, padding='valid', activation='relu', strides=1))
model.add(Flatten())
model.add(Dense(600))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def train_one_hot_model(model, word_seq_train, y_train):
# define callbacks
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=4, verbose=1)
callbacks_list = [early_stopping]
model.fit(
word_seq_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
callbacks=callbacks_list,
validation_split=0.1,
shuffle=True,
verbose=2)
return model