-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathTest133.py
96 lines (69 loc) · 3.07 KB
/
Test133.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#-*-coding=UTF-8-*-
#https://archive.ics.uci.edu/ml/datasets/Iris
#https://archive.ics.uci.edu/ml/machine-learning-databases/iris/
'''
softMax
'''
import tensorflow as tf
import os
# this time weights form a matrix , not a column vector , one "weight vector" per class
W = tf.Variable(tf.zeros([4,3],name="weights"))
# so do the biases , one per class
b = tf.Variable(tf.zeros([3],name="bias"))
def combine_inputs(X):
return tf.matmul(X,W)+b
def inference(X):
return tf.nn.softmax(combine_inputs(X))
def loss(X,Y):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(combine_inputs(X),Y))
def read_csv(batch_size, file_name, record_defaults):
filename_queue = tf.train.string_input_producer([os.path.dirname(__file__)+"/133/iris/"+file_name])
reader=tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
# decode_csv will convert a Tensor from type string (the text line) in
# a tuple of tensor columns with the specified defaults, which also
# sets the data type for each column
decoded = tf.decode_csv(value,record_defaults=record_defaults)
# batch actually reads the file and loads "batch_size" rows in a single tensor
return tf.train.shuffle_batch(decoded,
batch_size=batch_size,
capacity=batch_size*50,
min_after_dequeue=batch_size)
def inputs():
sepal_length, sepal_width, petal_length, petal_width, label =\
read_csv(100, "iris.data", [[0.0], [0.0], [0.0], [0.0], [""]])
# convert class names to a 0 based class index.
label_number = tf.to_int32(tf.argmax(tf.to_int32(tf.pack([
tf.equal(label, ["Iris-setosa"]),
tf.equal(label, ["Iris-versicolor"]),
tf.equal(label, ["Iris-virginica"])
])), 0))
# Pack all the features that we care about in a single matrix;
# We then transpose to have a matrix with one example per row and one feature per column.
features = tf.transpose(tf.pack([sepal_length, sepal_width, petal_length, petal_width]))
return features, label_number
def train(total_loss):
learning_rate = 0.01
return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)
def evaluate(sess, X, Y):
predicted = tf.cast(tf.arg_max(inference(X), 1), tf.int32)
print sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))
# Launch the graph in a session, setup boilerplate
with tf.Session() as sess:
tf.initialize_all_variables().run()
X, Y = inputs()
total_loss = loss(X, Y)
train_op = train(total_loss)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# actual training loop
training_steps = 1000
for step in range(training_steps):
sess.run([train_op])
# for debugging and learning purposes, see how the loss gets decremented thru training steps
if step % 10 == 0:
print "loss: ", sess.run([total_loss])
evaluate(sess, X, Y)
coord.request_stop()
coord.join(threads)
sess.close()