Skip to content

Commit

Permalink
new version 0.83
Browse files Browse the repository at this point in the history
  • Loading branch information
AutoViML committed Feb 22, 2023
1 parent 697b21b commit e960c92
Show file tree
Hide file tree
Showing 35 changed files with 11,206 additions and 1 deletion.
52 changes: 52 additions & 0 deletions build/lib/deep_autoviml/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
############################################################################################
#Copyright 2021 Google LLC

#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
# -*- coding: utf-8 -*-
################################################################################
# deep_auto_viml - build and test multiple Tensorflow 2.0 models and pipelines
# Python v3.6+ tensorflow v2.4.1+
# Created by Ram Seshadri
# Licensed under Apache License v2
################################################################################
# Version
from .__version__ import __version__
__all__ = ['data_load', 'models', 'modeling', 'preprocessing', 'utilities']
import pdb

from .deep_autoviml import fit
from deep_autoviml.modeling.predict_model import load_test_data, predict, predict_images, predict_text
from deep_autoviml.utilities.utilities import print_one_row_from_tf_dataset, print_one_row_from_tf_label
from deep_autoviml.utilities.utilities import print_classification_metrics, print_regression_model_stats
from deep_autoviml.utilities.utilities import print_classification_model_stats, plot_history, plot_classification_results
################################################################################
if __name__ == "__main__":
module_type = 'Running'
else:
module_type = 'Imported'
version_number = __version__
print("""
%s deep_auto_viml. version=%s
from deep_autoviml import deep_autoviml as deepauto
-------------------
model, cat_vocab_dict = deepauto.fit(train, target, keras_model_type="fast",
project_name="deep_autoviml", keras_options=keras_options,
model_options=model_options, save_model_flag=True, use_my_model='',
model_use_case='', verbose=0)
predictions = deepauto.predict(model, project_name, test_dataset=test,
keras_model_type=keras_model_type,
cat_vocab_dict=cat_vocab_dict)
""" %(module_type, version_number))
################################################################################
25 changes: 25 additions & 0 deletions build/lib/deep_autoviml/__version__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
############################################################################################
#Copyright 2021 Google LLC

#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
# -*- coding: utf-8 -*-
"""Specifies the version of the deep_autoviml package."""

__title__ = "deep_autoviml"
__author__ = "Ram Seshadri"
__description__ = "deep_autoviml - build and test multiple Tensorflow 2.0 models and pipelines"
__url__ = "https://github.com/Auto_ViML/deep_autoviml.git"
__version__ = "0.0.82"
__license__ = "Apache License 2.0"
__copyright__ = "2020-21 Google"
1,219 changes: 1,219 additions & 0 deletions build/lib/deep_autoviml/data_load/classify_features.py

Large diffs are not rendered by default.

1,326 changes: 1,326 additions & 0 deletions build/lib/deep_autoviml/data_load/extract.py

Large diffs are not rendered by default.

524 changes: 524 additions & 0 deletions build/lib/deep_autoviml/deep_autoviml.py

Large diffs are not rendered by default.

487 changes: 487 additions & 0 deletions build/lib/deep_autoviml/modeling/create_model.py

Large diffs are not rendered by default.

139 changes: 139 additions & 0 deletions build/lib/deep_autoviml/modeling/one_cycle.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
############################################################################################
#Copyright 2021 Google LLC

#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import logging

logging.getLogger('tensorflow').setLevel(logging.ERROR)

from tensorflow.keras.callbacks import Callback
#########################################################################################################
###### One Cycle is a Super-Convergence technique developed by Leslie Smith: https://arxiv.org/abs/1708.07120
###### Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates
###### This particular implementation is by Andrich van Wyk • September 02, 2019
###### Used with permission: https://www.avanwyk.com/tensorflow-2-super-convergence-with-the-1cycle-policy/
#########################################################################################################
class CosineAnnealer:

def __init__(self, start, end, steps):
self.start = start
self.end = end
self.steps = steps
self.n = 0

def step(self):
self.n += 1
cos = np.cos(np.pi * (self.n / self.steps)) + 1
return self.end + (self.start - self.end) / 2. * cos


class OneCycleScheduler(Callback):
"""
#########################################################################################################
###### One Cycle is a Super-Convergence technique developed by Leslie Smith: https://arxiv.org/abs/1708.07120
###### Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates
###### This particular implementation is by Andrich van Wyk • September 02, 2019
###### Credit: https://www.avanwyk.com/tensorflow-2-super-convergence-with-the-1cycle-policy/
#########################################################################################################
Callback that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf).
If the model supports a momentum parameter, it will also be adapted by the schedule.
The implementation adopts additional improvements as per the fastai library: https://docs.fast.ai/callbacks.one_cycle.html, where
only two phases are used and the adaptation is done using cosine annealing.
"""

def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, phase_1_pct=0.3, div_factor=25.):
super(OneCycleScheduler, self).__init__()
lr_min = lr_max / div_factor
final_lr = lr_max / (div_factor * 1e4)
phase_1_steps = steps * phase_1_pct
phase_2_steps = steps - phase_1_steps

self.phase_1_steps = phase_1_steps
self.phase_2_steps = phase_2_steps
self.phase = 0
self.step = 0

self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(mom_max, mom_min, phase_1_steps)],
[CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)]]

self.lrs = []
self.moms = []

def on_train_begin(self, logs=None):
self.phase = 0
self.step = 0

self.set_lr(self.lr_schedule().start)
self.set_momentum(self.mom_schedule().start)

def on_train_batch_begin(self, batch, logs=None):
self.lrs.append(self.get_lr())
self.moms.append(self.get_momentum())

def on_train_batch_end(self, batch, logs=None):
self.step += 1
if self.step >= self.phase_1_steps:
self.phase = 1
self.set_lr(self.lr_schedule().step())
self.set_momentum(self.mom_schedule().step())

def get_lr(self):
try:
return tf.keras.backend.get_value(self.model.optimizer.lr)
except AttributeError:
return None

def get_momentum(self):
try:
return tf.keras.backend.get_value(self.model.optimizer.momentum)
except AttributeError:
return None

def set_lr(self, lr):
try:
if lr < 0:
lr = 0.1
self.phase = 0
self.step = 0

self.set_lr(self.lr_schedule().start)
self.set_momentum(self.mom_schedule().start)
tf.keras.backend.clear_session()

tf.keras.backend.set_value(self.model.optimizer.lr, lr)
except AttributeError:
pass # ignore

def set_momentum(self, mom):
try:
tf.keras.backend.set_value(self.model.optimizer.momentum, mom)
except AttributeError:
pass # ignore

def lr_schedule(self):
return self.phases[self.phase][0]

def mom_schedule(self):
return self.phases[self.phase][1]

def plot(self):
ax = plt.subplot(1, 2, 1)
ax.plot(self.lrs)
ax.set_title('Learning Rate')
ax = plt.subplot(1, 2, 2)
ax.plot(self.moms)
ax.set_title('Momentum')
Loading

0 comments on commit e960c92

Please sign in to comment.