Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Olexiy troschij #15

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2,251 changes: 2,251 additions & 0 deletions tasks/OlexiyTr_code/task_1/House pricing/House_Prising_Olexij_Troschij.ipynb

Large diffs are not rendered by default.

143 changes: 143 additions & 0 deletions tasks/OlexiyTr_code/task_1/Titanic/Olexij_Trsochij_Titanic.ipynb

Large diffs are not rendered by default.

332 changes: 332 additions & 0 deletions tasks/OlexiyTr_code/task_2/Face mask/Image_Detection.ipynb

Large diffs are not rendered by default.

955 changes: 955 additions & 0 deletions tasks/OlexiyTr_code/task_3/Transfer_Learning.ipynb

Large diffs are not rendered by default.

141 changes: 141 additions & 0 deletions tasks/OlexiyTr_code/task_3/transfer_learning.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
# -*- coding: utf-8 -*-
"""Transfer Learning

Automatically generated by Colaboratory.

Original file is located at
https://colab.research.google.com/drive/11W4P9OMoLafNSM5TWLVsR095Bgxe7t2e
"""

from google.colab import drive
drive.mount('/content/gdrive')

PATH_TO_DATA = "/content/gdrive/MyDrive/data"
!ls {PATH_TO_DATA}

import numpy as np
import pandas as pd
import os
import cv2

import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image_dataset_from_directory

BATCH_SIZE = 32
IMG_SIZE = (160,160)

train_dataset = image_dataset_from_directory(PATH_TO_DATA,
validation_split=0.3,
subset= "training",
seed=42,
image_size= IMG_SIZE,
batch_size=BATCH_SIZE)

validation_dataset = image_dataset_from_directory(PATH_TO_DATA,
validation_split = 0.3,
shuffle=True,
subset= "validation",
seed=42,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE
)

val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)

print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))

AUTOTUNE = tf.data.AUTOTUNE

train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)

data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])

preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input

rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)

IMG_SHAPE = IMG_SIZE + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')

image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)

base_model.trainable = False

base_model.summary()

global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)

prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)

inputs = tf.keras.Input(shape=(160, 160, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)

base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])

model.summary()

len(model.trainable_variables)

initial_epochs = 5

loss0, accuracy0 = model.evaluate(validation_dataset)

print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))

history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()

Large diffs are not rendered by default.

492 changes: 0 additions & 492 deletions tasks/task_1/Classification_example_with_Iris_dataset.ipynb

This file was deleted.

193 changes: 0 additions & 193 deletions tasks/task_1/Classification_example_with_Iris_dataset.py

This file was deleted.

3 changes: 0 additions & 3 deletions tasks/task_1/README.md

This file was deleted.