-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathch9_linear_regression.py
85 lines (50 loc) · 2.17 KB
/
ch9_linear_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
m,n = housing.data.shape
print(m,n)
housing_data_plus_bias = np.c_[np.ones((m,1)), housing.data]
print("Linear Regression Example")
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1,1), dtype=tf.float32, name="y")
XT = tf.transpose(X)
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT,X)), XT), y)
with tf.Session() as sess:
theta_value = theta.eval()
print(theta_value)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
print(scaler.fit(housing_data_plus_bias))
print(scaler.mean_)
scaled_housing_data_plus_bias = scaler.transform(housing_data_plus_bias)
print("Gradient Descent Example")
n_epochs = 1000
learning_rate = 0.01
X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
# cacluated manually using the derivative
#gradients = 2/m * tf.matmul(tf.transpose(X), error)
# calculated using the autodiff feature.. more efficient
#gradients = tf.gradients(mse, [theta])[0]
#training_op = tf.assign(theta, theta - learning_rate * gradients)
# calculated using tfs gradient descent optimizer
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
#training_op = optimizer.minimize(mse)
# calculated using tfs momentum descent optimizer - faster
#optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch ", epoch, " MSE = ", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)