-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsimple_nn_regression.py
85 lines (65 loc) · 2.17 KB
/
simple_nn_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# -*- coding: utf-8 -*-
"""simple_nn_regression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17qsXq5-kUOoqRnIju97T9fu2JSrldWbj
"""
#!/usr/bin/env python
'''
Simple NN using pytorch
'''
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Defining input size, hidden layer size, output size and batch size respectively
n_in, n_h, n_out, batch_size = 100, 500 ,1, 10000
val_range = 100.0
# getting data
#boston = load_boston()
#boston_df = pd.DataFrame(boston['data'] ) # Change to Pandas data frame
#boston_df['PRICE']= boston['target'] # Set price as the target
#boston_df.head()
#X = boston_df.iloc[:,0:13]
#y = boston_df['PRICE']
# Split the data into a training set and a test set
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
##print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# Create dummy input and target tensors (data)
x_temp = torch.randn(batch_size, n_in)
x = torch.from_numpy(val_range * x_temp.numpy())
y = torch.from_numpy(x.numpy()*x.numpy())
print(x,y)
# Create a model
model = nn.Sequential(nn.Linear(n_in, n_h),
nn.ReLU(),
nn.Linear(n_h, n_out),
nn.Sigmoid())
#print(x.shape)
# Construct the loss function
criterion = torch.nn.MSELoss()
# Construct the optimizer (Stochastic Gradient Descent in this case)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# Gradient Descent
epoch = 150
while epoch:
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x)
# Compute and print loss
loss = criterion(y_pred, y)
print('epoch: ', epoch,' loss: ', loss.item())
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
# perform a backward pass (backpropagation)
loss.backward()
# Update the parameters
optimizer.step()
# decrease epoch
epoch = epoch - 1
x_test = torch.randn(10, n_in)
y_pred = model(x_test)
x = x.data.cpu().numpy()
x = x_test.data.cpu().numpy()
y = y_pred.data.cpu().numpy()
plt.plot(x,y)