-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcatpred.py
179 lines (129 loc) · 4.87 KB
/
catpred.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
# -*- coding: utf-8 -*-
"""catPred.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16rijz2clJ5U9dGNsgXjabytwzEpVtiJJ
CATARACT PREDICTION
Cataract is an illness which induces partial or reversible blindness globally, usually seen in aged people. AI is the next approach in the technology which specifically focusses on automation an quicker results. Our Motive is to build a better classification model which can predict correctly for a good number of patients and help them in early stage of detecting cataract.
### 1. Data acquisition
The Dataset for this project has been collected from kaggle (https://www.kaggle.com/jr2ngb/cataractdataset).
The data consists of normal fundus images and cataract fundus images 100 each.
"""
# installing dependencies
import os
from pathlib import Path
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import shuffle
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization
"""Resizing all the images into a fix size (256x256 pixel)"""
p = Path("dataset/")
dirs = p.glob("*")
image_dataset = []
labels = []
labelsToImg = {0:"1_normal",1:"2_cataract"}
labelDict = {"1_normal":0,"2_cataract":1}
for folder_dir in dirs:
label = str(folder_dir).split("\\")[-1]
count = 0
for img_path in folder_dir.glob("*png"):
img = image.load_img(img_path,target_size=(256,256)) # resizing to 227x227 pixels
img_array = image.img_to_array(img)
image_dataset.append(img_array)
labels.append(labelDict[label])
count+=1
print(count)
print(len(image_dataset))
print(len(labels))
X = np.array(image_dataset)
Y = np.array(labels)
print(X.shape,Y.shape)
X,Y = shuffle(X,Y,random_state=2)
# Normalizing the data
X = X/255.0
"""### Visualizing the Normalised Images"""
def show(img,label):
plt.title(labelsToImg[label])
plt.imshow(img)
plt.axis("off")
plt.show()
for i in range(1,10):
r = np.random.randint(50)
show(X[r],Y[r])
"""### Training and Validation split"""
"""
Splitting our dataset 20% for testing on unseen data and remaining 80%
for training and validation.
"""
split = int(X.shape[0]*0.8)
X_ = np.array(X)
Y_ = np.array(Y)
#Training Set
X_Train = X_[:split,:]
Y_Train = Y_[:split]
#X_Train = X_Train.reshape(len(X_Train),227,227,3)
#Y_Train = Y_Train.reshape(len(Y_Train),1)
Y_Train = np.asarray(Y_Train).astype('float32').reshape((-1,1))
#Y_Train = np.asarray(Y_Train).astype('float32').reshape((-1,1))
#Test Set
X_Test = X_[split:,:]
Y_Test = Y_[split:]
#X_Test = X_Test.reshape(len(X_Test),227,227,3)
#Y_Test = Y_Test.reshape(len(Y_Test),1)
Y_Test = np.asarray(Y_Test).astype('float32').reshape((-1,1))
#Y_Test = np.asarray(Y_Test).astype('float32').reshape((-1,1))
print("Shape of X_Train : ", X_Train.shape)
print("Shape of Y_Train : ", Y_Train.shape)
print("Shape of X_Test : ", X_Test.shape)
print("Shape of Y_Test : ", Y_Test.shape)
# create a sequential model
model = Sequential()
# 1st conv layer
model.add(Conv2D(filters=96,input_shape=(256,256,3), kernel_size=(11,11), strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# first layer has 96 filters, the input shape is 227x227x3
# kernel size is 11x11, striding 4x4, ReLu is the activation function
# MaxPooling
model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid'))
# 2nd conv layer
model.add(Conv2D(filters=256,kernel_size=(5,5),strides=(1,1),padding='valid'))
model.add(Activation('relu'))
# MaxPooling
model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid'))
# 3rd conv layer
model.add(Conv2D(filters=384,kernel_size=(3,3),strides=(1,1),padding='valid'))
model.add(Activation('relu'))
# 4th conv layer
model.add(Conv2D(filters=384,kernel_size=(3,3),strides=(1,1),padding='valid'))
model.add(Activation('relu'))
# 5th conv layer
model.add(Conv2D(filters=256,kernel_size=(3,3),strides=(1,1),padding='valid'))
model.add(Activation('relu'))
# MaxPooling
model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid'))
# flatten
model.add(Flatten())
# 1st FC layer 4096 neurons
model.add(Dense(4096,input_shape=(256*256*3,)))
model.add(Activation('relu'))
# adding dropout to regularize overfitting
#model.add(Dropout(0.4))
# 2nd FC layer
model.add(Dense(4096))
model.add(Activation('relu'))
# adding dropout to regularize overfitting
#model.add(Dropout(0.4))
# Output layer
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
model.fit(X_Train,Y_Train,epochs=50,batch_size=32)
"""highest acc - 95.63% train set 50th epoch using rmsprop optimizer
"""
model.evaluate(X_Test,Y_Test)
"""rms prop - test - 80% acc"""