This repository has been archived by the owner on Feb 28, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 39
/
Copy pathnn.js
176 lines (140 loc) · 5.38 KB
/
nn.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
// Daniel Shiffman
// Nature of Code: Intelligence and Learning
// https://github.com/shiffman/NOC-S17-2-Intelligence-Learning
// Based on "Make Your Own Neural Network" by Tariq Rashid
// https://github.com/makeyourownneuralnetwork/
// This version of nn.js adds some functionality for evolution
// copy() and mutate()
// Sigmoid function
// This is used for activation
// https://en.wikipedia.org/wiki/Sigmoid_function
NeuralNetwork.sigmoid = function(x) {
var y = 1 / (1 + pow(Math.E, -x));
return y;
}
// This is the Sigmoid derivative!
NeuralNetwork.dSigmoid = function(x) {
return x * (1 - x);
}
NeuralNetwork.tanh = function(x) {
var y = Math.tanh(x);
return y;
}
NeuralNetwork.dtanh = function(x) {
var y = 1 / (pow(Math.cosh(x), 2));
return y;
}
// This is how we adjust weights ever so slightly
function mutate(x) {
if (random(1) < 0.1) {
var offset = randomGaussian() * 0.5;
// var offset = random(-0.1, 0.1);
var newx = x + offset;
return newx;
} else {
return x;
}
}
// Neural Network constructor function
function NeuralNetwork(inputnodes, hiddennodes, outputnodes, learning_rate, activation) {
// If it's a copy of another NN
if (arguments[0] instanceof NeuralNetwork) {
var nn = arguments[0];
this.inodes = nn.inodes;
this.hnodes = nn.hnodes;
this.onodes = nn.onodes;
this.wih = nn.wih.copy();
this.who = nn.who.copy();
this.activation = nn.activation;
this.derivative = nn.derivative;
this.lr = this.lr;
} else {
// Number of nodes in layer (input, hidden, output)
// This network is limited to 3 layers
this.inodes = inputnodes;
this.hnodes = hiddennodes;
this.onodes = outputnodes;
// These are the weight matrices
// wih: weights from input to hidden
// who: weights from hidden to output
// weights inside the arrays are w_i_j
// where link is from node i to node j in the next layer
// Matrix is rows X columns
this.wih = new Matrix(this.hnodes, this.inodes);
this.who = new Matrix(this.onodes, this.hnodes);
// Start with random values
this.wih.randomize();
this.who.randomize();
// Default learning rate of 0.1
this.lr = learning_rate || 0.1;
// Activation Function
if (activation == 'tanh') {
this.activation = NeuralNetwork.tanh;
this.derivative = NeuralNetwork.dtanh;
} else {
this.activation = NeuralNetwork.sigmoid;
this.derivative = NeuralNetwork.dSigmoid;
}
}
}
NeuralNetwork.prototype.copy = function() {
return new NeuralNetwork(this);
}
NeuralNetwork.prototype.mutate = function() {
this.wih = Matrix.map(this.wih, mutate);
this.who = Matrix.map(this.who, mutate);
}
// Train the network with inputs and targets
NeuralNetwork.prototype.train = function(inputs_array, targets_array) {
// Turn input and target arrays into matrices
var inputs = Matrix.fromArray(inputs_array);
var targets = Matrix.fromArray(targets_array);
// The input to the hidden layer is the weights (wih) multiplied by inputs
var hidden_inputs = Matrix.dot(this.wih, inputs);
// The outputs of the hidden layer pass through sigmoid activation function
var hidden_outputs = Matrix.map(hidden_inputs, this.activation);
// The input to the output layer is the weights (who) multiplied by hidden layer
var output_inputs = Matrix.dot(this.who, hidden_outputs);
// The output of the network passes through sigmoid activation function
var outputs = Matrix.map(output_inputs, this.activation);
// Error is TARGET - OUTPUT
var output_errors = Matrix.subtract(targets, outputs);
// Now we are starting back propogation!
// Transpose hidden <-> output weights
var whoT = this.who.transpose();
// Hidden errors is output error multiplied by weights (who)
var hidden_errors = Matrix.dot(whoT, output_errors)
// Calculate the gradient, this is much nicer in python!
var gradient_output = Matrix.map(outputs, this.derivative);
// Weight by errors and learing rate
gradient_output.multiply(output_errors);
gradient_output.multiply(this.lr);
// Gradients for next layer, more back propogation!
var gradient_hidden = Matrix.map(hidden_outputs, this.derivative);
// Weight by errors and learning rate
gradient_hidden.multiply(hidden_errors);
gradient_hidden.multiply(this.lr);
// Change in weights from HIDDEN --> OUTPUT
var hidden_outputs_T = hidden_outputs.transpose();
var deltaW_output = Matrix.dot(gradient_output, hidden_outputs_T);
this.who.add(deltaW_output);
// Change in weights from INPUT --> HIDDEN
var inputs_T = inputs.transpose();
var deltaW_hidden = Matrix.dot(gradient_hidden, inputs_T);
this.wih.add(deltaW_hidden);
}
// Query the network!
NeuralNetwork.prototype.query = function(inputs_array) {
// Turn input array into a matrix
var inputs = Matrix.fromArray(inputs_array);
// The input to the hidden layer is the weights (wih) multiplied by inputs
var hidden_inputs = Matrix.dot(this.wih, inputs);
// The outputs of the hidden layer pass through sigmoid activation function
var hidden_outputs = Matrix.map(hidden_inputs, this.activation);
// The input to the output layer is the weights (who) multiplied by hidden layer
var output_inputs = Matrix.dot(this.who, hidden_outputs);
// The output of the network passes through sigmoid activation function
var outputs = Matrix.map(output_inputs, this.activation);
// Return the result as an array
return outputs.toArray();
}