diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/__init__.py b/runtime/onert/api/python/package/experimental/train/optimizer/__init__.py new file mode 100644 index 00000000000..ae450a7d0f4 --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/__init__.py @@ -0,0 +1,5 @@ +from .sgd import SGD +from .adam import Adam +from onert.native.libnnfw_api_pybind import trainable_ops + +__all__ = ["SGD", "Adam", "trainable_ops"] diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/adam.py b/runtime/onert/api/python/package/experimental/train/optimizer/adam.py new file mode 100644 index 00000000000..29de7d75517 --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/adam.py @@ -0,0 +1,20 @@ +from .optimizer import Optimizer + + +class Adam(Optimizer): + """ + Adam optimizer. + """ + def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7): + """ + Initialize the Adam optimizer. + Args: + learning_rate (float): The learning rate for optimization. + beta1 (float): Exponential decay rate for the first moment estimates. + beta2 (float): Exponential decay rate for the second moment estimates. + epsilon (float): Small constant to prevent division by zero. + """ + super().__init__(learning_rate) + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py b/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py new file mode 100644 index 00000000000..5f47ddaf4bd --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/optimizer.py @@ -0,0 +1,15 @@ +from onert.native.libnnfw_api_pybind import trainable_ops + + +class Optimizer: + """ + Base class for optimizers. Subclasses should implement the `step` method. + """ + def __init__(self, learning_rate=0.001, nums_trainable_ops=trainable_ops.ALL): + """ + Initialize the optimizer. + Args: + learning_rate (float): The learning rate for optimization. + """ + self.learning_rate = learning_rate + self.nums_trainable_ops = nums_trainable_ops diff --git a/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py b/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py new file mode 100644 index 00000000000..6e08f927573 --- /dev/null +++ b/runtime/onert/api/python/package/experimental/train/optimizer/sgd.py @@ -0,0 +1,21 @@ +from .optimizer import Optimizer + + +class SGD(Optimizer): + """ + Stochastic Gradient Descent (SGD) optimizer. + """ + def __init__(self, learning_rate=0.001, momentum=0.0): + """ + Initialize the SGD optimizer. + Args: + learning_rate (float): The learning rate for optimization. + momentum (float): Momentum factor (default: 0.0). + """ + super().__init__(learning_rate) + + if momentum != 0.0: + raise NotImplementedError( + "Momentum is not supported in the current version of SGD.") + self.momentum = momentum + self.velocity = None