-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel_training.py
109 lines (89 loc) · 3.76 KB
/
model_training.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from matplotlib import pyplot as plt
import tensorflow as tf
import os
#from google.colab.patches import cv2_imshow
import numpy as np
def prepare_training_data(dataset, shuffle=False, augment=False):
# resizing and normalization of dataset
resizing_layer = tf.keras.layers.Resizing(512, 512)
dataset = dataset.map(lambda x, y: (resizing_layer(x), y))
normalization_layer = tf.keras.layers.Rescaling(1. / 255)
dataset = dataset.map(lambda x, y: (normalization_layer(x), y))
if shuffle:
dataset = dataset.shuffle(10)
# data augmentation
if augment:
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomFlip("horizontal"),
tf.keras.layers.RandomRotation(0.2)
])
dataset = dataset.map(lambda x, y: (data_augmentation(x, training=True), y),
num_parallel_calls=tf.data.AUTOTUNE)
return dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
# loading picture data
def load_training_data(animal_type):
training_data_path = "../training_data_without_background/"
batch_size = 1
pic_height = 512
pic_width = 512
training_dataset = tf.keras.utils.image_dataset_from_directory(
training_data_path+animal_type,
validation_split=0.25,
subset="training",
seed=123,
image_size=(pic_height, pic_width),
batch_size=batch_size
)
validation_dataset = tf.keras.utils.image_dataset_from_directory(
training_data_path+animal_type,
validation_split=0.25,
subset="validation",
seed=123,
image_size=(pic_height, pic_width),
batch_size=batch_size
)
class_names = training_dataset.class_names
training_dataset = prepare_training_data(training_dataset, shuffle=True, augment=True)
validation_dataset = prepare_training_data(validation_dataset, shuffle=True, augment=True)
return training_dataset, validation_dataset, class_names
# training the model
def train_model(animal_type):
# loading dataset
training_dataset, validation_dataset, class_names = load_training_data(animal_type)
number_of_classes = len(class_names)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), padding='same', activation='softmax'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation='softmax'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(16, (3,3), padding='same', activation='softmax'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax'),
tf.keras.layers.Dense(number_of_classes, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
epochs_number = 10
history = model.fit(training_dataset, validation_data=validation_dataset, epochs=epochs_number)
acc = history.history['accuracy']
val_acc = history.history["val_accuracy"]
loss = history.history['loss']
val_loss = history.history["val_loss"]
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(range(epochs_number), acc, label='Training Accuracy')
plt.plot(range(epochs_number), val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training Accuracy')
plt.subplot(1, 2, 2)
plt.plot(range(epochs_number), loss, label='Training Loss')
plt.plot(range(epochs_number), val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training Loss')
plt.show()
return model, class_names
#train_model("dog")
#load_training_data("dog")