From 539c34f01951ac68c843f1a1ede78d99612dafeb Mon Sep 17 00:00:00 2001 From: David Refaeli <32735496+MaverickMeerkat@users.noreply.github.com> Date: Wed, 18 Sep 2019 18:34:45 +0300 Subject: [PATCH 1/2] add activation function to each layer doesn't make much sense without it - otherwise this whole NN is just an elaborate linear combination. --- notebooks/3_NeuralNetworks/neural_network_raw.ipynb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/notebooks/3_NeuralNetworks/neural_network_raw.ipynb b/notebooks/3_NeuralNetworks/neural_network_raw.ipynb index 6d9dbd24..a828f5d5 100644 --- a/notebooks/3_NeuralNetworks/neural_network_raw.ipynb +++ b/notebooks/3_NeuralNetworks/neural_network_raw.ipynb @@ -117,10 +117,13 @@ "def neural_net(x):\n", " # Hidden fully connected layer with 256 neurons\n", " layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n", + " layer_1 = tf.nn.sigmoid(layer_1)", " # Hidden fully connected layer with 256 neurons\n", " layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n", + " layer_2 = tf.nn.sigmoid(layer_2)", " # Output fully connected layer with a neuron for each class\n", " out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n", + " out_layer = tf.nn.softmax(out_layer)", " return out_layer" ] }, From 4758f22a16f651871f9b625cb769e8f470474bea Mon Sep 17 00:00:00 2001 From: David Refaeli <32735496+MaverickMeerkat@users.noreply.github.com> Date: Mon, 7 Oct 2019 17:35:05 +0300 Subject: [PATCH 2/2] Update neural_network_raw.ipynb no need for softmax activation, as the softmax_cross_entropy_with_logits requires the logits, i.e. the values before the softmax. --- notebooks/3_NeuralNetworks/neural_network_raw.ipynb | 1 - 1 file changed, 1 deletion(-) diff --git a/notebooks/3_NeuralNetworks/neural_network_raw.ipynb b/notebooks/3_NeuralNetworks/neural_network_raw.ipynb index a828f5d5..d7a50ecd 100644 --- a/notebooks/3_NeuralNetworks/neural_network_raw.ipynb +++ b/notebooks/3_NeuralNetworks/neural_network_raw.ipynb @@ -123,7 +123,6 @@ " layer_2 = tf.nn.sigmoid(layer_2)", " # Output fully connected layer with a neuron for each class\n", " out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n", - " out_layer = tf.nn.softmax(out_layer)", " return out_layer" ] },