From 77b096c0c6bb940217a7043600999a92ff5c1fbc Mon Sep 17 00:00:00 2001 From: ragmani Date: Mon, 13 Jan 2025 06:55:30 +0000 Subject: [PATCH 1/2] [onert/python] Introduce optimizer API This commit introduces optimizer API. - adam.py : Adam optimizer class - sgd.py : SGB optimizer class ONE-DCO-1.0-Signed-off-by: ragmani --- .../experimental/train/optimizer/__init__.py | 5 +++ .../experimental/train/optimizer/adam.py | 45 +++++++++++++++++++ .../experimental/train/optimizer/optimizer.py | 24 ++++++++++ .../experimental/train/optimizer/sgd.py | 36 +++++++++++++++ 4 files changed, 110 insertions(+) create mode 100644 runtime/onert/api/python/package/experimental/train/optimizer/__init__.py create mode 100644 runtime/onert/api/python/package/experimental/train/optimizer/adam.py create mode 100644 runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py create mode 100644 runtime/onert/api/python/package/experimental/train/optimizer/sgd.py diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/__init__.py b/runtime/onert/api/python/package/experimental/train/optimizer/__init__.py new file mode 100644 index 00000000000..ae450a7d0f4 --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/__init__.py @@ -0,0 +1,5 @@ +from .sgd import SGD +from .adam import Adam +from onert.native.libnnfw_api_pybind import trainable_ops + +__all__ = ["SGD", "Adam", "trainable_ops"] diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/adam.py b/runtime/onert/api/python/package/experimental/train/optimizer/adam.py new file mode 100644 index 00000000000..7d2d795a3b7 --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/adam.py @@ -0,0 +1,45 @@ +from .optimizer import Optimizer + + +class Adam(Optimizer): + """ + Adam optimizer. + """ + def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): + """ + Initialize the Adam optimizer. + Args: + learning_rate (float): The learning rate for optimization. + beta1 (float): Exponential decay rate for the first moment estimates. + beta2 (float): Exponential decay rate for the second moment estimates. + epsilon (float): Small constant to prevent division by zero. + """ + super().__init__(learning_rate) + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.m = None + self.v = None + self.t = 0 + + def step(self, gradients, parameters): + """ + Update parameters using Adam optimization. + Args: + gradients (list): List of gradients for each parameter. + parameters (list): List of parameters to be updated. + """ + if self.m is None: + self.m = [0] * len(parameters) + if self.v is None: + self.v = [0] * len(parameters) + + self.t += 1 + for i, (grad, param) in enumerate(zip(gradients, parameters)): + self.m[i] = self.beta1 * self.m[i] + (1 - self.beta1) * grad + self.v[i] = self.beta2 * self.v[i] + (1 - self.beta2) * (grad**2) + + m_hat = self.m[i] / (1 - self.beta1**self.t) + v_hat = self.v[i] / (1 - self.beta2**self.t) + + param -= self.learning_rate * m_hat / (v_hat**0.5 + self.epsilon) diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py b/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py new file mode 100644 index 00000000000..12a46c6eaea --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py @@ -0,0 +1,24 @@ +from onert.native.libnnfw_api_pybind import trainable_ops + + +class Optimizer: + """ + Base class for optimizers. Subclasses should implement the `step` method. + """ + def __init__(self, learning_rate=0.001, nums_trainable_ops=trainable_ops.ALL): + """ + Initialize the optimizer. + Args: + learning_rate (float): The learning rate for optimization. + """ + self.learning_rate = learning_rate + self.nums_trainable_ops = nums_trainable_ops + + def step(self, gradients, parameters): + """ + Update parameters based on gradients. Should be implemented by subclasses. + Args: + gradients (list): List of gradients for each parameter. + parameters (list): List of parameters to be updated. + """ + raise NotImplementedError("Subclasses must implement the `step` method.") diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py b/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py new file mode 100644 index 00000000000..15f91a79fb6 --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py @@ -0,0 +1,36 @@ +from .optimizer import Optimizer + + +class SGD(Optimizer): + """ + Stochastic Gradient Descent (SGD) optimizer. + """ + def __init__(self, learning_rate=0.001, momentum=0.0): + """ + Initialize the SGD optimizer. + Args: + learning_rate (float): The learning rate for optimization. + momentum (float): Momentum factor (default: 0.0). + """ + super().__init__(learning_rate) + + if momentum != 0.0: + raise NotImplementedError( + "Momentum is not supported in the current version of SGD.") + self.momentum = momentum + self.velocity = None + + def step(self, gradients, parameters): + """ + Update parameters using SGD with optional momentum. + Args: + gradients (list): List of gradients for each parameter. + parameters (list): List of parameters to be updated. + """ + if self.velocity is None: + self.velocity = [0] * len(parameters) + + for i, (grad, param) in enumerate(zip(gradients, parameters)): + self.velocity[ + i] = self.momentum * self.velocity[i] - self.learning_rate * grad + parameters[i] += self.velocity[i] From 69fef07c5ee951ffbb81c36a7f44254e98cd7dd1 Mon Sep 17 00:00:00 2001 From: ragmani Date: Wed, 15 Jan 2025 03:56:45 +0000 Subject: [PATCH 2/2] Remove step function --- .../experimental/train/optimizer/adam.py | 25 ------------------- .../experimental/train/optimizer/optimizer.py | 9 ------- .../experimental/train/optimizer/sgd.py | 15 ----------- 3 files changed, 49 deletions(-) diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/adam.py b/runtime/onert/api/python/package/experimental/train/optimizer/adam.py index 7d2d795a3b7..29de7d75517 100644 --- a/runtime/onert/api/python/package/experimental/train/optimizer/adam.py +++ b/runtime/onert/api/python/package/experimental/train/optimizer/adam.py @@ -18,28 +18,3 @@ def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon - self.m = None - self.v = None - self.t = 0 - - def step(self, gradients, parameters): - """ - Update parameters using Adam optimization. - Args: - gradients (list): List of gradients for each parameter. - parameters (list): List of parameters to be updated. - """ - if self.m is None: - self.m = [0] * len(parameters) - if self.v is None: - self.v = [0] * len(parameters) - - self.t += 1 - for i, (grad, param) in enumerate(zip(gradients, parameters)): - self.m[i] = self.beta1 * self.m[i] + (1 - self.beta1) * grad - self.v[i] = self.beta2 * self.v[i] + (1 - self.beta2) * (grad**2) - - m_hat = self.m[i] / (1 - self.beta1**self.t) - v_hat = self.v[i] / (1 - self.beta2**self.t) - - param -= self.learning_rate * m_hat / (v_hat**0.5 + self.epsilon) diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py b/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py index 12a46c6eaea..5f47ddaf4bd 100644 --- a/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py +++ b/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py @@ -13,12 +13,3 @@ def __init__(self, learning_rate=0.001, nums_trainable_ops=trainable_ops.ALL): """ self.learning_rate = learning_rate self.nums_trainable_ops = nums_trainable_ops - - def step(self, gradients, parameters): - """ - Update parameters based on gradients. Should be implemented by subclasses. - Args: - gradients (list): List of gradients for each parameter. - parameters (list): List of parameters to be updated. - """ - raise NotImplementedError("Subclasses must implement the `step` method.") diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py b/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py index 15f91a79fb6..6e08f927573 100644 --- a/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py +++ b/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py @@ -19,18 +19,3 @@ def __init__(self, learning_rate=0.001, momentum=0.0): "Momentum is not supported in the current version of SGD.") self.momentum = momentum self.velocity = None - - def step(self, gradients, parameters): - """ - Update parameters using SGD with optional momentum. - Args: - gradients (list): List of gradients for each parameter. - parameters (list): List of parameters to be updated. - """ - if self.velocity is None: - self.velocity = [0] * len(parameters) - - for i, (grad, param) in enumerate(zip(gradients, parameters)): - self.velocity[ - i] = self.momentum * self.velocity[i] - self.learning_rate * grad - parameters[i] += self.velocity[i]