diff --git a/Applications/Custom/LayerClient/jni/main.cpp b/Applications/Custom/LayerClient/jni/main.cpp index b655fce4c7..1790e59d11 100644 --- a/Applications/Custom/LayerClient/jni/main.cpp +++ b/Applications/Custom/LayerClient/jni/main.cpp @@ -20,6 +20,7 @@ /// @todo Migrate this to api #include +#include #include #include @@ -195,13 +196,15 @@ int main(int argc, char *argv[]) { } try { - auto &app_context = nntrainer::AppContext::Global(); + auto &ct_engine = nntrainer::Engine::Global(); + auto app_context = static_cast( + ct_engine.getRegisteredContext("cpu")); /// registering custom layer here /// registerFactory excepts a function that returns unique_ptr from /// std::vector ml::train::createLayer is a templated /// function for generic usage - app_context.registerFactory(nntrainer::createLayer); - app_context.registerFactory(nntrainer::createLayer); + app_context->registerFactory(nntrainer::createLayer); + app_context->registerFactory(nntrainer::createLayer); } catch (std::invalid_argument &e) { std::cerr << "failed to register factory, reason: " << e.what() << std::endl; diff --git a/Applications/LLaMA/jni/main.cpp b/Applications/LLaMA/jni/main.cpp index 32018d4098..bb27e469cc 100644 --- a/Applications/LLaMA/jni/main.cpp +++ b/Applications/LLaMA/jni/main.cpp @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -723,10 +724,11 @@ int main(int argc, char *argv[]) { #else std::string text = "This is smaple input for LLaMA."; #endif - - auto &app_context = nntrainer::AppContext::Global(); + auto &ct_engine = nntrainer::Engine::Global(); + auto app_context = + static_cast(ct_engine.getRegisteredContext("cpu")); try { - app_context.registerFactory(nntrainer::createLayer); + app_context->registerFactory(nntrainer::createLayer); } catch (std::invalid_argument &e) { std::cerr << "failed to register factory, reason: " << e.what() << std::endl; @@ -734,7 +736,7 @@ int main(int argc, char *argv[]) { } try { - app_context.registerFactory(nntrainer::createLayer); + app_context->registerFactory(nntrainer::createLayer); } catch (std::invalid_argument &e) { std::cerr << "failed to register factory, reason: " << e.what() << std::endl; diff --git a/Applications/SimpleShot/task_runner.cpp b/Applications/SimpleShot/task_runner.cpp index 15e2e0ab69..00ddbd86cb 100644 --- a/Applications/SimpleShot/task_runner.cpp +++ b/Applications/SimpleShot/task_runner.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -183,7 +184,9 @@ std::unique_ptr createModel(const std::string &backbone, * @return int */ int main(int argc, char **argv) { - auto &app_context = nntrainer::AppContext::Global(); + auto &ct_engine = nntrainer::Engine::Global(); + auto app_context = + static_cast(ct_engine.getRegisteredContext("cpu")); if (argc != 6 && argc != 5) { std::cout @@ -221,7 +224,7 @@ int main(int argc, char **argv) { std::string val_path = app_path + "/tasks/" + argv[4]; try { - app_context.registerFactory( + app_context->registerFactory( nntrainer::createLayer); } catch (std::exception &e) { std::cerr << "registering factory failed: " << e.what(); diff --git a/Applications/TransferLearning/CIFAR_Classification/jni/main.cpp b/Applications/TransferLearning/CIFAR_Classification/jni/main.cpp index 1d4d577910..8df417f66b 100644 --- a/Applications/TransferLearning/CIFAR_Classification/jni/main.cpp +++ b/Applications/TransferLearning/CIFAR_Classification/jni/main.cpp @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include @@ -375,7 +375,7 @@ int main(int argc, char *argv[]) { /// @todo add api version of this try { - nntrainer::AppContext::Global().setWorkingDirectory(data_path); + nntrainer::Engine::Global().setWorkingDirectory(data_path); } catch (std::invalid_argument &e) { std::cerr << "setting data_path failed, pwd is used instead"; } diff --git a/Applications/TransferLearning/CIFAR_Classification/jni/main_func.cpp b/Applications/TransferLearning/CIFAR_Classification/jni/main_func.cpp index 1046ee32e3..a15269a332 100644 --- a/Applications/TransferLearning/CIFAR_Classification/jni/main_func.cpp +++ b/Applications/TransferLearning/CIFAR_Classification/jni/main_func.cpp @@ -39,7 +39,7 @@ #include -#include +#include #define TRAINING true /** @@ -254,7 +254,7 @@ int main(int argc, char *argv[]) { /// @todo add api version of this try { - nntrainer::AppContext::Global().setWorkingDirectory(data_path); + nntrainer::Engine::Global().setWorkingDirectory(data_path); } catch (std::invalid_argument &e) { std::cerr << "setting data_path failed, pwd is used instead"; } diff --git a/Applications/YOLOv2/jni/main.cpp b/Applications/YOLOv2/jni/main.cpp index 4aeefab16d..dfd89d9300 100644 --- a/Applications/YOLOv2/jni/main.cpp +++ b/Applications/YOLOv2/jni/main.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -285,9 +286,13 @@ int main(int argc, char *argv[]) { << std::endl; try { - auto &app_context = nntrainer::AppContext::Global(); - app_context.registerFactory(nntrainer::createLayer); - app_context.registerFactory( + auto &ct_engine = nntrainer::Engine::Global(); + + auto app_context = static_cast( + ct_engine.getRegisteredContext("cpu")); + + app_context->registerFactory(nntrainer::createLayer); + app_context->registerFactory( nntrainer::createLayer); } catch (std::invalid_argument &e) { std::cerr << "failed to register factory, reason: " << e.what() diff --git a/Applications/YOLOv3/jni/main.cpp b/Applications/YOLOv3/jni/main.cpp index 783c59d226..c53e5cf452 100644 --- a/Applications/YOLOv3/jni/main.cpp +++ b/Applications/YOLOv3/jni/main.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -402,8 +403,11 @@ int main(int argc, char *argv[]) { << std::endl; try { - auto &app_context = nntrainer::AppContext::Global(); - app_context.registerFactory(nntrainer::createLayer); + auto &ct_engine = nntrainer::Engine::Global(); + auto app_context = static_cast( + ct_engine.getRegisteredContext("cpu")); + + app_context->registerFactory(nntrainer::createLayer); } catch (std::invalid_argument &e) { std::cerr << "failed to register factory, reason: " << e.what() << std::endl; @@ -411,8 +415,10 @@ int main(int argc, char *argv[]) { } try { - auto &app_context = nntrainer::AppContext::Global(); - app_context.registerFactory( + auto &ct_engine = nntrainer::Engine::Global(); + auto app_context = static_cast( + ct_engine.getRegisteredContext("cpu")); + app_context->registerFactory( nntrainer::createLayer); } catch (std::invalid_argument &e) { std::cerr << "failed to register yolov3 loss, reason: " << e.what() diff --git a/api/ccapi/include/common.h b/api/ccapi/include/common.h index f1ea0101e0..d1ce4cf25b 100644 --- a/api/ccapi/include/common.h +++ b/api/ccapi/include/common.h @@ -43,6 +43,15 @@ enum class ExecutionMode { VALIDATE /** Validate mode, label is necessary */ }; +/** + * @brief Enumeration of layer compute engine + */ +enum LayerComputeEngine { + CPU, /**< CPU as the compute engine */ + GPU, /**< GPU as the compute engine */ + QNN, /**< QNN as the compute engine */ +}; + /** * @brief Get the version of NNTrainer */ diff --git a/api/ccapi/include/layer.h b/api/ccapi/include/layer.h index e1a6885c7c..b1d267b79b 100644 --- a/api/ccapi/include/layer.h +++ b/api/ccapi/include/layer.h @@ -114,14 +114,6 @@ enum LayerType { LAYER_UNKNOWN = ML_TRAIN_LAYER_TYPE_UNKNOWN /**< Unknown */ }; -/** - * @brief Enumeration of layer compute engine - */ -enum LayerComputeEngine { - CPU, /**< CPU as the compute engine */ - GPU, /**< GPU as the compute engine */ -}; - /** * @class Layer Base class for layers * @brief Base class for all layers @@ -261,16 +253,14 @@ class Layer { */ std::unique_ptr createLayer(const LayerType &type, - const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU); + const std::vector &properties = {}); /** * @brief Factory creator with constructor for layer */ std::unique_ptr createLayer(const std::string &type, - const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU); + const std::vector &properties = {}); /** * @brief General Layer Factory function to register Layer @@ -343,37 +333,35 @@ DivideLayer(const std::vector &properties = {}) { /** * @brief Helper function to create fully connected layer */ -inline std::unique_ptr FullyConnected( - const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) { - return createLayer(LayerType::LAYER_FC, properties, compute_engine); +inline std::unique_ptr +FullyConnected(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_FC, properties); } /** * @brief Helper function to create Swiglu layer */ inline std::unique_ptr -Swiglu(const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) { - return createLayer(LayerType::LAYER_SWIGLU, properties, compute_engine); +Swiglu(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_SWIGLU, properties); } -/** - * @brief Helper function to create RMS normalization layer for GPU - */ -inline std::unique_ptr -RMSNormCl(const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::GPU) { - return createLayer(LayerType::LAYER_RMSNORM, properties, compute_engine); -} +// /** +// * @brief Helper function to create RMS normalization layer for GPU +// */ +// inline std::unique_ptr +// RMSNormCl(const std::vector &properties = {}, +// const LayerComputeEngine &compute_engine = LayerComputeEngine::GPU) +// { +// return createLayer(LayerType::LAYER_RMSNORM, properties, compute_engine); +// } /** * @brief Helper function to create Transpose layer */ inline std::unique_ptr -Transpose(const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) { - return createLayer(LayerType::LAYER_TRANSPOSE, properties, compute_engine); +Transpose(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_TRANSPOSE, properties); } /** @@ -428,27 +416,24 @@ Flatten(const std::vector &properties = {}) { * @brief Helper function to create reshape layer */ inline std::unique_ptr -Reshape(const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) { - return createLayer(LayerType::LAYER_RESHAPE, properties, compute_engine); +Reshape(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_RESHAPE, properties); } /** * @brief Helper function to create addition layer */ inline std::unique_ptr -Addition(const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) { - return createLayer(LayerType::LAYER_ADDITION, properties, compute_engine); +Addition(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_ADDITION, properties); } /** * @brief Helper function to create concat layer */ inline std::unique_ptr -Concat(const std::vector &properties = {}, - const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) { - return createLayer(LayerType::LAYER_CONCAT, properties, compute_engine); +Concat(const std::vector &properties = {}) { + return createLayer(LayerType::LAYER_CONCAT, properties); } /** diff --git a/api/ccapi/src/factory.cpp b/api/ccapi/src/factory.cpp index 5f2b2dd2b9..75c8f09134 100644 --- a/api/ccapi/src/factory.cpp +++ b/api/ccapi/src/factory.cpp @@ -15,9 +15,9 @@ #include #include -#include #include #include +#include #include #include #include @@ -31,18 +31,16 @@ namespace ml { namespace train { std::unique_ptr createLayer(const LayerType &type, - const std::vector &properties, - const LayerComputeEngine &compute_engine) { - return nntrainer::createLayerNode(type, properties, compute_engine); + const std::vector &properties) { + return nntrainer::createLayerNode(type, properties); } /** * @brief Factory creator with constructor for layer */ std::unique_ptr createLayer(const std::string &type, - const std::vector &properties, - const LayerComputeEngine &compute_engine) { - return nntrainer::createLayerNode(type, properties, compute_engine); + const std::vector &properties) { + return nntrainer::createLayerNode(type, properties); } std::unique_ptr @@ -129,8 +127,8 @@ createDataset(DatasetType type, datagen_cb cb, void *user_data, std::unique_ptr createLearningRateScheduler(const LearningRateSchedulerType &type, const std::vector &properties) { - auto &ac = nntrainer::AppContext::Global(); - return ac.createObject(type, properties); + auto &eg = nntrainer::Engine::Global(); + return eg.createLearningRateSchedulerObject(type, properties); } /** @@ -139,8 +137,8 @@ createLearningRateScheduler(const LearningRateSchedulerType &type, std::unique_ptr createLearningRateScheduler(const std::string &type, const std::vector &properties) { - auto &ac = nntrainer::AppContext::Global(); - return ac.createObject(type, properties); + auto &eg = nntrainer::Engine::Global(); + return eg.createLearningRateSchedulerObject(type, properties); } std::string getVersion() { diff --git a/debian/nntrainer-dev.install b/debian/nntrainer-dev.install index 73459d8097..1a7f41f8ae 100644 --- a/debian/nntrainer-dev.install +++ b/debian/nntrainer-dev.install @@ -31,6 +31,8 @@ /usr/include/nntrainer/loss_layer.h # custom layer kits /usr/include/nntrainer/app_context.h +/usr/include/nntrainer/context.h +/usr/include/nntrainer/engine.h # logger /usr/include/nntrainer/nntrainer_log.h /usr/include/nntrainer/nntrainer_logger.h diff --git a/nntrainer/app_context.cpp b/nntrainer/app_context.cpp index 61890fc9d8..f16ba508b6 100644 --- a/nntrainer/app_context.cpp +++ b/nntrainer/app_context.cpp @@ -409,12 +409,8 @@ static void registerer(AppContext &ac) noexcept { }; AppContext &AppContext::Global() { - static AppContext instance; - /// in g++ there is a bug that hangs up if caller throws, - /// so registerer is noexcept although it'd better not - /// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70298 - std::call_once(global_app_context_init_flag, registerer, std::ref(instance)); - return instance; + registerer(*this); + return *this; } void AppContext::setWorkingDirectory(const std::string &base) { diff --git a/nntrainer/app_context.h b/nntrainer/app_context.h index e0ed2ea0be..a5b19c25e4 100644 --- a/nntrainer/app_context.h +++ b/nntrainer/app_context.h @@ -31,6 +31,7 @@ #include #include +#include #include namespace nntrainer { @@ -42,37 +43,18 @@ namespace {} // namespace * @class AppContext contains user-dependent configuration * @brief App */ -class AppContext { +class AppContext : public Context { public: - using PropsType = std::vector; - - template using PtrType = std::unique_ptr; - - template - using FactoryType = std::function(const PropsType &)>; - - template - using PtrFactoryType = PtrType (*)(const PropsType &); - template - using StrIndexType = std::unordered_map>; - - /** integer to string key */ - using IntIndexType = std::unordered_map; /** - * This type contains tuple of - * 1) integer -> string index - * 2) string -> factory index + * @brief Default constructor */ - template - using IndexType = std::tuple, IntIndexType>; - - template using FactoryMap = std::tuple...>; + AppContext() = default; /** - * @brief Default constructor + * @brief Default destructor */ - AppContext() = default; + ~AppContext() override = default; /** * @@ -80,7 +62,7 @@ class AppContext { * * @return AppContext& */ - static AppContext &Global(); + AppContext &Global(); /** * @brief Set Working Directory for a relative path. working directory is set @@ -201,6 +183,44 @@ class AppContext { const std::string &key = "", const int int_key = -1); + std::unique_ptr + createLayerObject(const std::string &type, + const std::vector &properties = {}) override { + return createObject(type, properties); + } + + std::unique_ptr createOptimizerObject( + const std::string &type, + const std::vector &properties = {}) override { + return createObject(type, properties); + } + + std::unique_ptr + createLearningRateSchedulerObject( + const std::string &type, + const std::vector &properties = {}) override { + return createObject(type, properties); + } + + std::unique_ptr + createLayerObject(const int int_key, + const std::vector &properties = {}) override { + return createObject(int_key, properties); + } + + std::unique_ptr createOptimizerObject( + const int int_key, + const std::vector &properties = {}) override { + return createObject(int_key, properties); + } + + std::unique_ptr + createLearningRateSchedulerObject( + const int int_key, + const std::vector &properties = {}) override { + return createObject(int_key, properties); + } + /** * @brief Create an Object from the integer key * @@ -271,6 +291,8 @@ class AppContext { throw std::invalid_argument("cannot create unknown object"); } + std::string getName() override { return "cpu"; } + private: FactoryMap diff --git a/nntrainer/compiler/ini_interpreter.cpp b/nntrainer/compiler/ini_interpreter.cpp index 146e62ed1e..ca43111f1b 100644 --- a/nntrainer/compiler/ini_interpreter.cpp +++ b/nntrainer/compiler/ini_interpreter.cpp @@ -47,9 +47,9 @@ static constexpr const char *LRSCHED_STR = "LearningRateScheduler"; namespace nntrainer { IniGraphInterpreter::IniGraphInterpreter( - const AppContext &app_context_, + const Engine &ct_eg_, std::function pathResolver_) : - app_context(app_context_), pathResolver(pathResolver_) {} + ct_engine(ct_eg_), pathResolver(pathResolver_) {} IniGraphInterpreter::~IniGraphInterpreter() {} @@ -134,15 +134,15 @@ std::vector section2properties( */ template std::shared_ptr -section2layer(dictionary *ini, const std::string &sec_name, - const AppContext &ac, const std::string &backbone_file, +section2layer(dictionary *ini, const std::string &sec_name, const Engine &eg, + const std::string &backbone_file, std::function &pathResolver) { throw std::invalid_argument("supported only with a tag for now"); } template <> std::shared_ptr section2layer( - dictionary *ini, const std::string &sec_name, const AppContext &ac, + dictionary *ini, const std::string &sec_name, const Engine &eg, const std::string &backbone_file, std::function &pathResolver) { @@ -153,13 +153,14 @@ std::shared_ptr section2layer( auto properties = section2properties(ini, sec_name, pathResolver); - auto layer = createLayerNode(ac.createObject(layer_type), properties); + auto layer = + createLayerNode(eg.createLayerObject(layer_type, properties), properties); return layer; } template <> std::shared_ptr section2layer( - dictionary *ini, const std::string &sec_name, const AppContext &ac, + dictionary *ini, const std::string &sec_name, const Engine &eg, const std::string &backbone_file, std::function &pathResolver) { std::string type; @@ -348,11 +349,11 @@ GraphRepresentation IniGraphInterpreter::deserialize(const std::string &in) { } if (std::strcmp(backbone_path, UNKNOWN_STR) == 0) { - layer = section2layer(ini, sec_name, app_context, "", - pathResolver); + layer = + section2layer(ini, sec_name, ct_engine, "", pathResolver); } else { - layer = section2layer(ini, sec_name, app_context, - backbone, pathResolver); + layer = section2layer(ini, sec_name, ct_engine, backbone, + pathResolver); } graph.push_back(layer); diff --git a/nntrainer/compiler/ini_interpreter.h b/nntrainer/compiler/ini_interpreter.h index 457a032bf4..5ad6d11b37 100644 --- a/nntrainer/compiler/ini_interpreter.h +++ b/nntrainer/compiler/ini_interpreter.h @@ -18,7 +18,7 @@ #include -#include +#include #include namespace nntrainer { @@ -35,7 +35,7 @@ class IniGraphInterpreter : public GraphInterpreter { * @param pathResolver_ path resolver function to be used */ IniGraphInterpreter( - const AppContext &app_context_ = AppContext::Global(), + const Engine &ct_engine_ = Engine::Global(), std::function pathResolver_ = [](const std::string &path) { return path; }); @@ -58,7 +58,7 @@ class IniGraphInterpreter : public GraphInterpreter { GraphRepresentation deserialize(const std::string &in) override; private: - AppContext app_context; + Engine ct_engine; std::function pathResolver; }; diff --git a/nntrainer/compiler/tflite_interpreter.h b/nntrainer/compiler/tflite_interpreter.h index d54e1f1c76..df4b98f902 100644 --- a/nntrainer/compiler/tflite_interpreter.h +++ b/nntrainer/compiler/tflite_interpreter.h @@ -14,7 +14,7 @@ #include -#include +#include namespace nntrainer { /** @@ -28,8 +28,8 @@ class TfliteInterpreter : public GraphInterpreter { * * @param app_context_ app context to create layers */ - TfliteInterpreter(const AppContext &app_context_ = AppContext::Global()) : - app_context(app_context_) {} + TfliteInterpreter(const Engine &ct_engine_ = Engine::Global()) : + ct_engine(ct_engine_) {} /** * @brief Destroy the Tflite Interpreter object @@ -49,7 +49,7 @@ class TfliteInterpreter : public GraphInterpreter { GraphRepresentation deserialize(const std::string &in) override; private: - AppContext app_context; + Engine ct_engine; }; } // namespace nntrainer diff --git a/nntrainer/context.h b/nntrainer/context.h new file mode 100644 index 0000000000..9be568f8f1 --- /dev/null +++ b/nntrainer/context.h @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2024 Jijoong Moon + * + * @file context.h + * @date 10 Dec 2024 + * @see https://github.com/nnstreamer/nntrainer + * @author Jijoong Moon + * @bug No known bugs except for NYI items + * @brief This file contains app context related functions and classes that + * manages the global configuration of the current environment. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +namespace nntrainer { + +/** + * @class Context contains user-dependent configuration for support + * @brief support for app context + */ + +class Context { +public: + using PropsType = std::vector; + + template using PtrType = std::unique_ptr; + + template + using FactoryType = std::function(const PropsType &)>; + + template + using PtrFactoryType = PtrType (*)(const PropsType &); + + template + using StrIndexType = std::unordered_map>; + + /** integer to string key */ + using IntIndexType = std::unordered_map; + + /** + * This type contains tuple of + * 1) integer -> string index + * 2) string -> factory index + */ + template + using IndexType = std::tuple, IntIndexType>; + + template using FactoryMap = std::tuple...>; + + /** + * @brief Default constructor + */ + Context() = default; + + /** + * @brief Destructor + */ + virtual ~Context() = default; + + /** + * + * @brief Get Global qnn context. + * + * @return Context& + */ + virtual Context &Global() = 0; + + /** + * + * @brief Initialization of Context. + * + * @return status & + */ + virtual int init() { return 0; }; + + /** + * @brief Create an Layer Object from the type (string) + * + * @param type type of layer + * @param props property + * @return PtrType unique pointer to the object + */ + virtual PtrType + createLayerObject(const std::string &type, + const std::vector &props = {}) { + return nullptr; + }; + + /** + * @brief Create an Layer Object from the integer key + * + * @param int_key integer key + * @param props property + * @return PtrType unique pointer to the object + */ + virtual PtrType + createLayerObject(const int int_key, + const std::vector &props = {}) { + return nullptr; + }; + + /** + * @brief Create an Optimizer Object from the type (stirng) + * + * @param type type of optimizer + * @param props property + * @return PtrType unique pointer to the object + */ + virtual PtrType + createOptimizerObject(const std::string &type, + const std::vector &props = {}) { + return nullptr; + }; + + /** + * @brief Create an Layer Object from the integer key + * + * @param int_key integer key + * @param props property + * @return PtrType unique pointer to the object + */ + virtual PtrType + createOptimizerObject(const int int_key, + const std::vector &properties = {}) { + return nullptr; + }; + + /** + * @brief Create an LearningRateScheduler Object from the type (stirng) + * + * @param type type of optimizer + * @param props property + * @return PtrType unique pointer to the + * object + */ + virtual PtrType + createLearningRateSchedulerObject( + const std::string &type, const std::vector &propeties = {}) { + return nullptr; + } + + /** + * @brief Create an LearningRateScheduler Object from the integer key + * + * @param int_key integer key + * @param props property + * @return PtrType unique pointer to the + * object + */ + virtual std::unique_ptr + createLearningRateSchedulerObject( + const int int_key, const std::vector &propeties = {}) { + return nullptr; + } + + /** + * @brief getter of context name + * + * @return string name of the context + */ + virtual std::string getName() = 0; + +private: + /** + * @brief map of context + */ + static inline std::unordered_map ContextMap; +}; + +using CreateContextFunc = nntrainer::Context *(*)(); +using DestroyContextFunc = void (*)(nntrainer::Context *); + +/** + * @brief Context Pluggable struct that enables pluggable layer + * + */ +typedef struct { + CreateContextFunc createfunc; /**< create layer function */ + DestroyContextFunc destroyfunc; /**< destory function */ +} ContextPluggable; + +/** + * @brief pluggable Context must have this structure defined + */ +extern "C" ContextPluggable ml_train_context_pluggable; + +} // namespace nntrainer + +#endif /* __CONTEXT_H__ */ diff --git a/nntrainer/engine.cpp b/nntrainer/engine.cpp new file mode 100644 index 0000000000..f7e2c77a46 --- /dev/null +++ b/nntrainer/engine.cpp @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2024 Jijoong Moon + * + * @file engine.cpp + * @date 27 December 2024 + * @brief This file contains engine context related functions and classes that + * manages the engines (NPU, GPU, CPU) of the current environment + * @see https://github.com/nnstreamer/nntrainer + * @author Jijoong Moon + * @bug No known bugs except for NYI items + * + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef ENABLE_OPENCL +#include +#endif + +static std::string solib_suffix = ".so"; +static std::string contextlib_suffix = "context.so"; +static const std::string func_tag = "[Engine] "; + +namespace nntrainer { + +std::mutex engine_mutex; + +std::once_flag global_engine_init_flag; + +void Engine::add_default_object(Engine &eg) { + /// @note all layers should be added to the app_context to guarantee that + /// createLayer/createOptimizer class is created + + nntrainer::AppContext *app_context = new nntrainer::AppContext(); + app_context->Global(); + + eg.registerContext("cpu", app_context); + + // #ifdef ENALBE_OPENCL + // eg.registererContext("gpu", + // nntrainer::ClContext(nntrainer::ClContext::Global())); + // #endif +} + +void Engine::registerer(Engine &eg) noexcept { + try { + add_default_object(eg); + } catch (std::exception &e) { + ml_loge("registering layers failed!!, reason: %s", e.what()); + } catch (...) { + ml_loge("registering layer failed due to unknown reason"); + } +}; + +Engine &Engine::Global() { + static Engine instance; + /// in g++ there is a bug that hangs up if caller throws, + /// so registerer is noexcept although it'd better not + /// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70298 + std::call_once(global_engine_init_flag, registerer, std::ref(instance)); + return instance; +} + +std::string +Engine::parseComputeEngine(const std::vector &props) const { + for (auto &prop : props) { + std::string key, value; + int status = nntrainer::getKeyValue(prop, key, value); + if (nntrainer::istrequal(key, "engine")) { + constexpr const auto data = + std::data(props::ComputeEngineTypeInfo::EnumList); + for (uint i = 0; i < props::ComputeEngineTypeInfo::EnumList.size(); ++i) { + if (nntrainer::istrequal(value.c_str(), + props::ComputeEngineTypeInfo::EnumStr[i])) { + return props::ComputeEngineTypeInfo::EnumStr[i]; + } + } + } + } + + return "cpu"; +} + +/** + * @brief Get the Full Path from given string + * @details path is resolved in the following order + * 1) if @a path is absolute, return path + * ---------------------------------------- + * 2) if @a base == "" && @a path == "", return "." + * 3) if @a base == "" && @a path != "", return @a path + * 4) if @a base != "" && @a path == "", return @a base + * 5) if @a base != "" && @a path != "", return @a base + "/" + path + * + * @param path path to calculate from base + * @param base base path + * @return const std::string + */ +const std::string getFullPath(const std::string &path, + const std::string &base) { + /// if path is absolute, return path + if (path[0] == '/') { + return path; + } + + if (base == std::string()) { + return path == std::string() ? "." : path; + } + + return path == std::string() ? base : base + "/" + path; +} + +const std::string Engine::getWorkingPath(const std::string &path) { + return getFullPath(path, working_path_base); +} + +void Engine::setWorkingDirectory(const std::string &base) { + DIR *dir = opendir(base.c_str()); + + if (!dir) { + std::stringstream ss; + ss << func_tag << "path is not directory or has no permission: " << base; + throw std::invalid_argument(ss.str().c_str()); + } + closedir(dir); + + char *ret = getRealpath(base.c_str(), nullptr); + + if (ret == nullptr) { + std::stringstream ss; + ss << func_tag << "failed to get canonical path for the path: "; + throw std::invalid_argument(ss.str().c_str()); + } + + working_path_base = std::string(ret); + ml_logd("working path base has set: %s", working_path_base.c_str()); + free(ret); +} + +int Engine::registerContext(const std::string &library_path, + const std::string &base_path) { + const std::string full_path = getFullPath(library_path, base_path); + + void *handle = dlopen(full_path.c_str(), RTLD_LAZY | RTLD_LOCAL); + const char *error_msg = dlerror(); + + NNTR_THROW_IF(handle == nullptr, std::invalid_argument) + << func_tag << "open plugin failed, reason: " << error_msg; + + nntrainer::ContextPluggable *pluggable = + reinterpret_cast( + dlsym(handle, "ml_train_context_pluggable")); + + error_msg = dlerror(); + auto close_dl = [handle] { dlclose(handle); }; + NNTR_THROW_IF_CLEANUP(error_msg != nullptr || pluggable == nullptr, + std::invalid_argument, close_dl) + << func_tag << "loading symbol failed, reason: " << error_msg; + + auto context = pluggable->createfunc(); + NNTR_THROW_IF_CLEANUP(context == nullptr, std::invalid_argument, close_dl) + << func_tag << "created pluggable context is null"; + auto type = context->getName(); + NNTR_THROW_IF_CLEANUP(type == "", std::invalid_argument, close_dl) + << func_tag << "custom layer must specify type name, but it is empty"; + + registerContext(type, context); + + return 0; +} + +} // namespace nntrainer diff --git a/nntrainer/engine.h b/nntrainer/engine.h new file mode 100644 index 0000000000..357be9d73c --- /dev/null +++ b/nntrainer/engine.h @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2024 Jijoong Moon + * + * @file engine.h + * @date 27 December 2024 + * @brief This file contains engine context related functions and classes that + * manages the engines (NPU, GPU, CPU) of the current environment + * @see https://github.com/nnstreamer/nntrainer + * @author Jijoong Moon + * @bug No known bugs except for NYI items + * + */ + +#ifndef __ENGINE_H__ +#define __ENGINE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace nntrainer { + +extern std::mutex engine_mutex; +namespace {} // namespace + +/** + * @class Engine contains user-dependent configuration + * @brief App + */ +class Engine { +protected: + static void registerer(Engine &eg) noexcept; + + static void add_default_object(Engine &eg); + + static void registerContext(std::string name, nntrainer::Context *context) { + const std::lock_guard lock(engine_mutex); + + std::transform(name.begin(), name.end(), name.begin(), + [](unsigned char c) { return std::tolower(c); }); + + if (engines.find(name) != engines.end()) { + std::stringstream ss; + ss << "Cannot register Context with name : " << name; + throw std::invalid_argument(ss.str().c_str()); + } + engines.insert(std::make_pair(name, context)); + } + +public: + /** + * @brief Default constructor + */ + Engine() = default; + + /** + * @brief Default Destructor + */ + ~Engine() = default; + + /** + * @brief register a Context from a shared library + * plugin must have **extern "C" LayerPluggable *ml_train_context_pluggable** + * defined else error + * + * @param library_path a file name of the library + * @param base_path base path to make a full path (optional) + * @throws std::invalid_parameter if library_path is invalid or library is + * invalid + */ + int registerContext(const std::string &library_path, + const std::string &base_path = ""); + + /** + * @brief get registered a Context + * + * @param name Registered Context Name + * @throws std::invalid_parameter if no context with name + * @return Context Pointer : for register Object factory, casting might be + * needed. + */ + static nntrainer::Context *getRegisteredContext(std::string name) { + + std::transform(name.begin(), name.end(), name.begin(), + [](unsigned char c) { return std::tolower(c); }); + + if (engines.find(name) == engines.end()) { + throw std::invalid_argument("not registered"); + } + return engines.at(name); + } + + /** + * + * @brief Get Global Engine which is Static. + * + * @return Engine& + */ + static Engine &Global(); + + /** + * + * @brief Parse compute Engine keywords in properties : eg) engine = cpu + * default is "cpu" + * @return Context name + */ + std::string parseComputeEngine(const std::vector &props) const; + + /** + * @brief Create an Layer Object with Layer name + * + * @param type layer name + * @param props property + * @return unitque_ptr unique pointer to the Layer object + */ + std::unique_ptr + createLayerObject(const std::string &type, + const std::vector &properties = {}) const { + auto ct = getRegisteredContext(parseComputeEngine(properties)); + ct->getName(); + return ct->createLayerObject(type); + } + + /** + * @brief Create an Layer Object with Layer key + * + * @param int_key key + * @param props property + * @return unitque_ptr unique pointer to the Layer object + */ + std::unique_ptr + createLayerObject(const int int_key, + const std::vector &properties = {}) const { + auto ct = getRegisteredContext(parseComputeEngine(properties)); + return ct->createLayerObject(int_key); + } + + /** + * @brief Create an Optimizer Object with Optimizer name + * + * @param type Optimizer name + * @param props property + * @return unitque_ptr unique pointer to the Optimizer object + */ + std::unique_ptr + createOptimizerObject(const std::string &type, + const std::vector &properties = {}) const { + auto ct = getRegisteredContext(parseComputeEngine(properties)); + return ct->createOptimizerObject(type); + } + + /** + * @brief Create an Optimizer Object with Optimizer key + * + * @param int_key key + * @param props property + * @return unitque_ptr unique pointer to the Optimizer object + */ + std::unique_ptr + createOptimizerObject(const int int_key, + const std::vector &properties = {}) const { + auto ct = getRegisteredContext(parseComputeEngine(properties)); + return ct->createOptimizerObject(int_key); + } + + /** + * @brief Create an LearningRateScheduler Object with type + * + * @param type type of LearningRateScheduler + * @param props property + * @return unitque_ptr unique pointer to the LearningRateScheduler object + */ + std::unique_ptr + createLearningRateSchedulerObject( + const std::string &type, + const std::vector &properties = {}) const { + auto ct = getRegisteredContext(parseComputeEngine(properties)); + return ct->createLearningRateSchedulerObject(type, properties); + } + + /** + * @brief Create an LearningRateScheduler Object with key + * + * @param int_key key + * @param props property + * @return unitque_ptr unique pointer to the LearningRateScheduler object + */ + std::unique_ptr + createLearningRateSchedulerObject( + const int int_key, const std::vector &properties = {}) { + auto ct = getRegisteredContext(parseComputeEngine(properties)); + return ct->createLearningRateSchedulerObject(int_key, properties); + } + + /** + * @brief Get Working Path from a relative or representation of a path + * starting from @a working_path_base. + * @param[in] path to make full path + * @return If absolute path is given, returns @a path + * If relative path is given and working_path_base is not set, return + * relative path. + * If relative path is given and working_path_base has set, return absolute + * path from current working directory + */ + const std::string getWorkingPath(const std::string &path = ""); + + /** + * @brief Set Working Directory for a relative path. working directory is set + * canonically + * @param[in] base base directory + * @throw std::invalid_argument if path is not valid for current system + */ + void setWorkingDirectory(const std::string &base); + + /** + * @brief unset working directory + * + */ + void unsetWorkingDirectory() { working_path_base = ""; } + + /** + * @brief query if the appcontext has working directory set + * + * @retval true working path base is set + * @retval false working path base is not set + */ + bool hasWorkingDirectory() { return !working_path_base.empty(); } + +private: + /** + * @brief map for Context and Context name + * + */ + static inline std::unordered_map engines; + + std::string working_path_base; +}; + +namespace plugin {} + +} // namespace nntrainer + +#endif /* __ENGINE_H__ */ diff --git a/nntrainer/layers/common_properties.h b/nntrainer/layers/common_properties.h index d73cd2f0fa..e89e270f56 100644 --- a/nntrainer/layers/common_properties.h +++ b/nntrainer/layers/common_properties.h @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -945,7 +946,8 @@ struct ActivationTypeInfo { * @brief Activation Enumeration Information * */ -class Activation final : public EnumProperty { +class Activation final + : public EnumProperty { public: using prop_tag = enum_class_prop_tag; static constexpr const char *key = "activation"; diff --git a/nntrainer/layers/layer_node.cpp b/nntrainer/layers/layer_node.cpp index 9c6c290703..b071af4cde 100644 --- a/nntrainer/layers/layer_node.cpp +++ b/nntrainer/layers/layer_node.cpp @@ -19,11 +19,12 @@ #include #include -#include #include #include #include #include +#include +#include #include #include #include @@ -135,17 +136,9 @@ LayerNode::~LayerNode() = default; */ std::unique_ptr createLayerNode(const ml::train::LayerType &type, - const std::vector &properties, - const ml::train::LayerComputeEngine &compute_engine) { -#ifdef ENABLE_OPENCL - if (compute_engine == ml::train::LayerComputeEngine::GPU) { - auto &cc = nntrainer::ClContext::Global(); - return createLayerNode(cc.createObject(type), properties, - compute_engine); - } -#endif - auto &ac = nntrainer::AppContext::Global(); - return createLayerNode(ac.createObject(type), properties); + const std::vector &properties) { + auto &eg = nntrainer::Engine::Global(); + return createLayerNode(eg.createLayerObject(type, properties), properties); } /** @@ -153,17 +146,9 @@ createLayerNode(const ml::train::LayerType &type, */ std::unique_ptr createLayerNode(const std::string &type, - const std::vector &properties, - const ml::train::LayerComputeEngine &compute_engine) { -#ifdef ENABLE_OPENCL - if (compute_engine == ml::train::LayerComputeEngine::GPU) { - auto &cc = nntrainer::ClContext::Global(); - return createLayerNode(cc.createObject(type), properties, - compute_engine); - } -#endif - auto &ac = nntrainer::AppContext::Global(); - return createLayerNode(ac.createObject(type), properties); + const std::vector &properties) { + auto &eg = nntrainer::Engine::Global(); + return createLayerNode(eg.createLayerObject(type, properties), properties); } /** @@ -171,16 +156,11 @@ createLayerNode(const std::string &type, */ std::unique_ptr createLayerNode(std::unique_ptr &&layer, - const std::vector &properties, - const ml::train::LayerComputeEngine &compute_engine) { + const std::vector &properties) { auto lnode = std::make_unique(std::move(layer)); lnode->setProperty(properties); - if (compute_engine == ml::train::LayerComputeEngine::GPU) { - lnode->setComputeEngine(compute_engine); - } - return lnode; } @@ -192,10 +172,10 @@ LayerNode::LayerNode(std::unique_ptr &&l) : output_connections(), run_context(nullptr), - layer_node_props( - new PropsType(props::Name(), props::Distribute(), props::Trainable(), {}, - {}, props::SharedFrom(), props::ClipGradByGlobalNorm(), - props::Packed(), props::LossScaleForMixed())), + layer_node_props(new PropsType( + props::Name(), props::Distribute(), props::Trainable(), {}, {}, + props::SharedFrom(), props::ClipGradByGlobalNorm(), props::Packed(), + props::LossScaleForMixed(), props::ComputeEngine())), layer_node_props_realization( new RealizationPropsType(props::Flatten(), props::Activation())), loss(new props::Loss()), @@ -670,6 +650,10 @@ InitLayerContext LayerNode::finalize(const std::vector &input_dims, if (!std::get(*layer_node_props).empty()) loss_scale = std::get(*layer_node_props).get(); + if (!std::get(*layer_node_props).empty()) { + compute_engine = std::get(*layer_node_props).get(); + } + if (!std::get(*layer_node_props).empty()) { bool isPacked = std::get(*layer_node_props); if (!isPacked) { diff --git a/nntrainer/layers/layer_node.h b/nntrainer/layers/layer_node.h index 9ed105d28a..49a2f230b0 100644 --- a/nntrainer/layers/layer_node.h +++ b/nntrainer/layers/layer_node.h @@ -53,6 +53,7 @@ class InputConnection; class ClipGradByGlobalNorm; class Packed; class LossScaleForMixed; +class ComputeEngine; } // namespace props /** @@ -994,11 +995,12 @@ will also contain the properties of the layer. The properties will be copied upon final creation. Editing properties of the layer after init will not the properties in the context/graph unless intended. */ - using PropsType = std::tuple, - std::vector, - props::SharedFrom, props::ClipGradByGlobalNorm, - props::Packed, props::LossScaleForMixed>; + using PropsType = + std::tuple, + std::vector, props::SharedFrom, + props::ClipGradByGlobalNorm, props::Packed, + props::LossScaleForMixed, props::ComputeEngine>; using RealizationPropsType = std::tuple; /** these realization properties results in addition of new layers, hence @@ -1070,9 +1072,7 @@ properties in the context/graph unless intended. */ */ std::unique_ptr createLayerNode(const ml::train::LayerType &type, - const std::vector &properties = {}, - const ml::train::LayerComputeEngine &compute_engine = - ml::train::LayerComputeEngine::CPU); + const std::vector &properties = {}); /** * @brief LayerNode creator with constructor @@ -1082,9 +1082,7 @@ createLayerNode(const ml::train::LayerType &type, */ std::unique_ptr createLayerNode(const std::string &type, - const std::vector &properties = {}, - const ml::train::LayerComputeEngine &compute_engine = - ml::train::LayerComputeEngine::CPU); + const std::vector &properties = {}); /** * @brief LayerNode creator with constructor @@ -1095,9 +1093,7 @@ createLayerNode(const std::string &type, */ std::unique_ptr createLayerNode(std::unique_ptr &&layer, - const std::vector &properties, - const ml::train::LayerComputeEngine &compute_engine = - ml::train::LayerComputeEngine::CPU); + const std::vector &properties); } // namespace nntrainer #endif // __LAYER_NODE_H__ diff --git a/nntrainer/meson.build b/nntrainer/meson.build index ed15b8f2a7..22e5748704 100644 --- a/nntrainer/meson.build +++ b/nntrainer/meson.build @@ -17,6 +17,8 @@ nntrainer_headers = [ meson.current_source_dir() / 'nntrainer_logger.h', meson.current_source_dir() / 'nntrainer_error.h', meson.current_source_dir() / 'app_context.h', + meson.current_source_dir() / 'context.h', + meson.current_source_dir() / 'engine.h', ] # Dependencies @@ -59,7 +61,8 @@ endforeach nntrainer_common_sources = [ 'nntrainer_logger.cpp', - 'app_context.cpp' + 'app_context.cpp', + 'engine.cpp' ] if get_option('enable-opencl') diff --git a/nntrainer/models/model_loader.cpp b/nntrainer/models/model_loader.cpp index 286ed17e43..699474f811 100644 --- a/nntrainer/models/model_loader.cpp +++ b/nntrainer/models/model_loader.cpp @@ -57,8 +57,8 @@ int ModelLoader::loadLearningRateSchedulerConfigIni( parseProperties(ini, "LearningRateScheduler", {"type"}); try { - auto lrs = app_context.createObject( - lrs_type, properties); + auto lrs = + ct_engine.createLearningRateSchedulerObject(lrs_type, properties); auto opt_wrapped = std::static_pointer_cast(optimizer); opt_wrapped->setLearningRateScheduler(std::move(lrs)); } catch (std::exception &e) { @@ -423,7 +423,7 @@ int ModelLoader::loadFromIni(std::string ini_file, NeuralNetwork &model, ml_logd("parsing graph started"); try { std::unique_ptr ini_interpreter = - std::make_unique(app_context, + std::make_unique(ct_engine, path_resolver); auto graph_representation = ini_interpreter->deserialize(ini_file); @@ -449,8 +449,9 @@ int ModelLoader::loadFromIni(std::string ini_file, NeuralNetwork &model, * @brief load all properties from context */ int ModelLoader::loadFromContext(NeuralNetwork &model) { - auto props = app_context.getProperties(); - model.setTrainConfig(props); + /// @todo: Property for Context needs to updated + // auto props = app_context.getProperties(); + // model.setTrainConfig(props); return ML_ERROR_NONE; } @@ -460,15 +461,15 @@ int ModelLoader::loadFromContext(NeuralNetwork &model) { */ int ModelLoader::loadFromConfig(std::string config, NeuralNetwork &model) { - if (model_file_context != nullptr) { + if (model_file_engine != nullptr) { ml_loge( - "model_file_context is already initialized, there is a possiblity that " + "model_file_engine is already initialized, there is a possiblity that " "last load from config wasn't finished correctly, and model loader is " "reused"); return ML_ERROR_UNKNOWN; } - model_file_context = std::make_unique(); + model_file_engine = std::make_unique(); auto config_realpath_char = getRealpath(config.c_str(), nullptr); if (config_realpath_char == nullptr) { @@ -489,12 +490,12 @@ int ModelLoader::loadFromConfig(std::string config, NeuralNetwork &model) { } auto base_path = config_realpath.substr(0, pos); - model_file_context->setWorkingDirectory(base_path); + model_file_engine->setWorkingDirectory(base_path); ml_logd("for the current model working directory is set to %s", base_path.c_str()); int status = loadFromConfig(config_realpath, model, false); - model_file_context.reset(); + model_file_engine.reset(); return status; } diff --git a/nntrainer/models/model_loader.h b/nntrainer/models/model_loader.h index 1c2032b5e9..3d8062dc4a 100644 --- a/nntrainer/models/model_loader.h +++ b/nntrainer/models/model_loader.h @@ -17,7 +17,7 @@ #include -#include +#include #include #include @@ -34,9 +34,8 @@ class ModelLoader { /** * @brief Constructor of the model loader */ - ModelLoader(const AppContext &app_context_ = AppContext::Global()) : - app_context(app_context_), - model_file_context(nullptr) {} + ModelLoader(const Engine &ct_eng_ = Engine::Global()) : + ct_engine(ct_eng_), model_file_engine(nullptr) {} /** * @brief Destructor of the model loader @@ -135,8 +134,8 @@ class ModelLoader { * @return const std::string resolved path. */ const std::string resolvePath(const std::string &path) { - auto app_context_resolved_path = app_context.getWorkingPath(path); - return model_file_context->getWorkingPath(app_context_resolved_path); + auto path_ = ct_engine.getWorkingPath(path); + return model_file_engine->getWorkingPath(path_); } /** @@ -152,11 +151,11 @@ class ModelLoader { const char *unknown = "Unknown"; const char *none = "none"; - AppContext app_context; - std::unique_ptr - model_file_context; /**< model_file specific context which is - referred to as if app_context cannot - resolve some given configuration */ + Engine ct_engine; + std::unique_ptr model_file_engine; + /**< model_file specific context which is + // referred to as if app_context cannot + // resolve some given configuration */ }; } /* namespace nntrainer */ diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp index bff18c2ddd..4a776a9d4c 100644 --- a/nntrainer/models/neuralnet.cpp +++ b/nntrainer/models/neuralnet.cpp @@ -81,10 +81,10 @@ NeuralNetwork::NeuralNetwork() : compiled(false), loadedFromConfig(false), exec_mode(ExecutionMode::TRAIN) { - app_context = AppContext(AppContext::Global()); + ct_engine = Engine(Engine::Global()); } -NeuralNetwork::NeuralNetwork(AppContext app_context_) : +NeuralNetwork::NeuralNetwork(Engine ct_engine_) : model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm(), props::LossScale()), model_flex_props( @@ -101,7 +101,7 @@ NeuralNetwork::NeuralNetwork(AppContext app_context_) : compiled(false), loadedFromConfig(false), exec_mode(ExecutionMode::TRAIN), - app_context(app_context_) {} + ct_engine(ct_engine_) {} int NeuralNetwork::loadFromConfig(const std::string &config) { if (loadedFromConfig == true) { @@ -109,7 +109,7 @@ int NeuralNetwork::loadFromConfig(const std::string &config) { return ML_ERROR_INVALID_PARAMETER; } - ModelLoader loader(app_context); + ModelLoader loader(ct_engine); NeuralNetwork tempNet(*this); int status = loader.loadFromContext(tempNet); diff --git a/nntrainer/models/neuralnet.h b/nntrainer/models/neuralnet.h index b79378138c..1e7348aab5 100644 --- a/nntrainer/models/neuralnet.h +++ b/nntrainer/models/neuralnet.h @@ -33,10 +33,10 @@ #include #endif -#include #include #include #include +#include #include #include #include @@ -92,7 +92,7 @@ class NeuralNetwork : public ml::train::Model { /** * @brief Constructor of NeuralNetwork Class */ - NeuralNetwork(AppContext app_context_); + NeuralNetwork(Engine ct_engine_); /** * @brief Destructor of NeuralNetwork Class @@ -625,11 +625,12 @@ s * @retval shared_ptr const std::string file_path) override; private: - using FlexiblePropTypes = std::tuple< - props::Epochs, props::TrainingBatchSize, props::SavePath, - props::ContinueTrain, props::SaveBestPath, props::MemoryOptimization, - props::MemorySwap, props::MemorySwapPath, props::MemorySwapLookahead, - props::TensorFormat, props::ModelTensorDataType>; + using FlexiblePropTypes = + std::tuple; using RigidPropTypes = std::tuple, std::vector, props::ClipGradByGlobalNorm, @@ -676,9 +677,10 @@ s * @retval shared_ptr ExecutionMode exec_mode; /** execution mode : train : inference */ - AppContext app_context; /** Configurations bound to current app */ + Engine ct_engine; /** Configurations bound to current engine */ + + NetworkGraph model_graph; /** Network Model Graph */ - NetworkGraph model_graph; /** Network Model Graph */ GraphRepresentation graph_representation; /** Unsorted graph representation */ DynamicTrainingOptimization dynamic_training_opt; /**< Dynamic fine-tuning diff --git a/nntrainer/optimizers/optimizer_wrapped.cpp b/nntrainer/optimizers/optimizer_wrapped.cpp index c8eec44f5f..2663a7fe69 100644 --- a/nntrainer/optimizers/optimizer_wrapped.cpp +++ b/nntrainer/optimizers/optimizer_wrapped.cpp @@ -12,8 +12,8 @@ * @details wraps the optimizer and learning rate scheduler together */ -#include #include +#include #include #include #include @@ -28,9 +28,8 @@ namespace nntrainer { std::unique_ptr createOptimizerWrapped(const ml::train::OptimizerType &type, const std::vector &properties) { - auto &ac = nntrainer::AppContext::Global(); - return createOptimizerWrapped(ac.createObject(type), - properties); + auto &eg = nntrainer::Engine::Global(); + return createOptimizerWrapped(eg.createOptimizerObject(type), properties); } /** @@ -39,9 +38,8 @@ createOptimizerWrapped(const ml::train::OptimizerType &type, std::unique_ptr createOptimizerWrapped(const std::string &type, const std::vector &properties) { - auto &ac = nntrainer::AppContext::Global(); - return createOptimizerWrapped(ac.createObject(type), - properties); + auto &eg = nntrainer::Engine::Global(); + return createOptimizerWrapped(eg.createOptimizerObject(type), properties); } /** diff --git a/nntrainer/utils/base_properties.h b/nntrainer/utils/base_properties.h index d6ceb710c1..c02ea029ad 100644 --- a/nntrainer/utils/base_properties.h +++ b/nntrainer/utils/base_properties.h @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -720,6 +721,26 @@ class TensorFormat final : public EnumProperty { }; }; +/** + * @brief Enumeration of Run Engine type + */ +struct ComputeEngineTypeInfo { + using Enum = ml::train::LayerComputeEngine; + static constexpr std::initializer_list EnumList = {Enum::CPU, Enum::GPU, + Enum::QNN}; + static constexpr const char *EnumStr[] = {"cpu", "gpu", "qnn"}; +}; + +/** + * @brief ComputeEngine Enumeration Information + * + */ +class ComputeEngine final : public EnumProperty { +public: + using prop_tag = enum_class_prop_tag; + static constexpr const char *key = "engine"; +}; + // /** // * @brief trainable property, use this to set and check how if certain layer // is diff --git a/nntrainer/utils/node_exporter.cpp b/nntrainer/utils/node_exporter.cpp index 40cb945cda..7f566e1da4 100644 --- a/nntrainer/utils/node_exporter.cpp +++ b/nntrainer/utils/node_exporter.cpp @@ -92,7 +92,7 @@ void Exporter::saveTflResult( std::vector, std::vector, props::SharedFrom, props::ClipGradByGlobalNorm, props::Packed, - props::LossScaleForMixed> &props, + props::LossScaleForMixed, props::ComputeEngine> &props, const LayerNode *self) { createIfNull(tf_node); tf_node->setLayerNode(*self); diff --git a/nntrainer/utils/node_exporter.h b/nntrainer/utils/node_exporter.h index c4c618a859..04e5ac0a54 100644 --- a/nntrainer/utils/node_exporter.h +++ b/nntrainer/utils/node_exporter.h @@ -249,7 +249,7 @@ void Exporter::saveTflResult( std::vector, std::vector, props::SharedFrom, props::ClipGradByGlobalNorm, props::Packed, - props::LossScaleForMixed> &props, + props::LossScaleForMixed, props::ComputeEngine> &props, const LayerNode *self); class BatchNormalizationLayer; diff --git a/packaging/nntrainer.spec b/packaging/nntrainer.spec index 0f42a44051..e863513c98 100644 --- a/packaging/nntrainer.spec +++ b/packaging/nntrainer.spec @@ -562,6 +562,8 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/ %{_includedir}/nntrainer/operation_layer.h # custom layer kits %{_includedir}/nntrainer/app_context.h +%{_includedir}/nntrainer/context.h +%{_includedir}/nntrainer/engine.h # optimizer headers %{_includedir}/nntrainer/optimizer_context.h %{_includedir}/nntrainer/optimizer_devel.h diff --git a/test/ccapi/unittest_ccapi.cpp b/test/ccapi/unittest_ccapi.cpp index a8d3fb4bd6..1630be928b 100644 --- a/test/ccapi/unittest_ccapi.cpp +++ b/test/ccapi/unittest_ccapi.cpp @@ -662,7 +662,7 @@ TEST(nntrainer_ccapi, model_copy_02_p) { */ int main(int argc, char **argv) { try { - nntrainer::AppContext::Global().setWorkingDirectory(getTestResPath("")); + nntrainer::Engine::Global().setWorkingDirectory(getTestResPath("")); } catch (std::invalid_argument &e) { std::cout << "failed to get test res path\n"; } diff --git a/test/nntrainer_test_util.cpp b/test/nntrainer_test_util.cpp index 7ff307558d..898a6baf8d 100644 --- a/test/nntrainer_test_util.cpp +++ b/test/nntrainer_test_util.cpp @@ -237,14 +237,19 @@ getResPath(const std::string &filename, nntrainer::GraphRepresentation makeGraph(const std::vector &layer_reps) { - static auto &ac = nntrainer::AppContext::Global(); + static auto &eg = nntrainer::Engine::Global(); + // #ifdef ENABLE_QNN + // static auto &qc = nntrainer::QNNContext::Global(); + // #endif nntrainer::GraphRepresentation graph_rep; for (const auto &layer_representation : layer_reps) { /// @todo Use unique_ptr here std::shared_ptr layer = nntrainer::createLayerNode( - ac.createObject(layer_representation.first), + eg.createLayerObject(layer_representation.first, + layer_representation.second), layer_representation.second); + graph_rep.push_back(layer); } @@ -255,14 +260,15 @@ nntrainer::GraphRepresentation makeCompiledGraph( const std::vector &layer_reps, std::vector> &realizers, const std::string &loss_layer) { - static auto &ac = nntrainer::AppContext::Global(); + static auto &eg = nntrainer::Engine::Global(); nntrainer::GraphRepresentation graph_rep; auto model_graph = nntrainer::NetworkGraph(); for (auto &layer_representation : layer_reps) { std::shared_ptr layer = nntrainer::createLayerNode( - ac.createObject(layer_representation.first), + eg.createLayerObject(layer_representation.first, + layer_representation.second), layer_representation.second); graph_rep.push_back(layer); } diff --git a/test/tizen_capi/unittest_tizen_capi.cpp b/test/tizen_capi/unittest_tizen_capi.cpp index 355513c094..cb41432f2a 100644 --- a/test/tizen_capi/unittest_tizen_capi.cpp +++ b/test/tizen_capi/unittest_tizen_capi.cpp @@ -1622,7 +1622,7 @@ TEST(nntrainer_capi_nnmodel, get_input_output_dimension_06_n) { */ int main(int argc, char **argv) { try { - nntrainer::AppContext::Global().setWorkingDirectory(getTestResPath("")); + nntrainer::Engine::Global().setWorkingDirectory(getTestResPath("")); } catch (std::invalid_argument &e) { ml_loge("Failed to get test res path\n"); } diff --git a/test/unittest/compiler/unittest_interpreter.cpp b/test/unittest/compiler/unittest_interpreter.cpp index 23c631d1c5..859e4f737a 100644 --- a/test/unittest/compiler/unittest_interpreter.cpp +++ b/test/unittest/compiler/unittest_interpreter.cpp @@ -29,7 +29,7 @@ using LayerRepresentation = std::pair>; auto ini_interpreter = std::make_shared( - nntrainer::AppContext::Global(), compilerPathResolver); + nntrainer::Engine::Global(), compilerPathResolver); /** * @brief nntrainer Interpreter Test setup diff --git a/test/unittest/layers/layers_dependent_common_tests.cpp b/test/unittest/layers/layers_dependent_common_tests.cpp index d3c0666031..a87db956fa 100644 --- a/test/unittest/layers/layers_dependent_common_tests.cpp +++ b/test/unittest/layers/layers_dependent_common_tests.cpp @@ -25,12 +25,14 @@ constexpr unsigned SAMPLE_TRIES = 10; TEST_P(LayerSemantics, createFromAppContext_pn) { - auto &ac = nntrainer::AppContext::Global(); + auto &eg = nntrainer::Engine::Global(); + auto ac = + static_cast(eg.getRegisteredContext("cpu")); if (!(options & LayerCreateSetPropertyOptions::AVAILABLE_FROM_APP_CONTEXT)) { - ac.registerFactory(std::get<0>(GetParam())); + ac->registerFactory(std::get<0>(GetParam())); } - EXPECT_EQ(ac.createObject(expected_type)->getType(), + EXPECT_EQ(ac->createObject(expected_type)->getType(), expected_type); } @@ -143,15 +145,13 @@ TEST_P(LayerSemanticsGpu, createFromClContext_pn) { // } TEST_P(LayerSemanticsGpu, setPropertiesInvalid_n) { - auto lnode = - nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU); + auto lnode = nntrainer::createLayerNode(expected_type, {"engine=gpu"}); /** must not crash */ EXPECT_THROW(layer->setProperty({"unknown_props=2"}), std::invalid_argument); } TEST_P(LayerSemanticsGpu, finalizeValidateLayerNode_p) { - auto lnode = - nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU); + auto lnode = nntrainer::createLayerNode(expected_type, {"engine=gpu"}); std::vector props = {"name=test"}; std::string input_shape = "input_shape=1:1:1"; std::string input_layers = "input_layers=a"; @@ -181,8 +181,7 @@ TEST_P(LayerSemanticsGpu, finalizeValidateLayerNode_p) { } TEST_P(LayerSemanticsGpu, getTypeValidateLayerNode_p) { - auto lnode = - nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU); + auto lnode = nntrainer::createLayerNode(expected_type, {"engine=gpu"}); std::string type; EXPECT_NO_THROW(type = lnode->getType()); @@ -190,8 +189,7 @@ TEST_P(LayerSemanticsGpu, getTypeValidateLayerNode_p) { } TEST_P(LayerSemanticsGpu, gettersValidateLayerNode_p) { - auto lnode = - nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU); + auto lnode = nntrainer::createLayerNode(expected_type, {"engine=gpu"}); EXPECT_NO_THROW(lnode->supportInPlace()); EXPECT_NO_THROW(lnode->requireLabel()); @@ -199,8 +197,7 @@ TEST_P(LayerSemanticsGpu, gettersValidateLayerNode_p) { } TEST_P(LayerSemanticsGpu, setBatchValidateLayerNode_p) { - auto lnode = - nntrainer::createLayerNode(expected_type, {}, ComputeEngine::GPU); + auto lnode = nntrainer::createLayerNode(expected_type, {"engine=gpu"}); std::vector props = {"name=test"}; std::string input_shape = "input_shape=1:1:1"; std::string input_layers = "input_layers=a"; diff --git a/test/unittest/unittest_nntrainer_appcontext.cpp b/test/unittest/unittest_nntrainer_appcontext.cpp index c7249a4dc7..c5e92d9b65 100644 --- a/test/unittest/unittest_nntrainer_appcontext.cpp +++ b/test/unittest/unittest_nntrainer_appcontext.cpp @@ -63,14 +63,14 @@ class nntrainerAppContextDirectory : public ::testing::Test { }; TEST_F(nntrainerAppContextDirectory, readFromGetPath_p) { - nntrainer::AppContext ac = nntrainer::AppContext::Global(); + nntrainer::Engine eg = nntrainer::Engine::Global(); - std::string path = ac.getWorkingPath("testfile.txt"); + std::string path = eg.getWorkingPath("testfile.txt"); EXPECT_EQ(path, "testfile.txt"); - ac.setWorkingDirectory("testdir"); + eg.setWorkingDirectory("testdir"); - path = ac.getWorkingPath("testfile.txt"); + path = eg.getWorkingPath("testfile.txt"); EXPECT_EQ(path, current_directory + "/testdir/testfile.txt"); std::ifstream file(path); @@ -80,17 +80,17 @@ TEST_F(nntrainerAppContextDirectory, readFromGetPath_p) { file.close(); - path = ac.getWorkingPath("/absolute/path"); + path = eg.getWorkingPath("/absolute/path"); EXPECT_EQ(path, "/absolute/path"); - path = ac.getWorkingPath(""); + path = eg.getWorkingPath(""); EXPECT_EQ(path, current_directory + "/testdir"); } TEST_F(nntrainerAppContextDirectory, notExisitingSetDirectory_n) { - nntrainer::AppContext ac = nntrainer::AppContext::Global(); + nntrainer::Engine eg = nntrainer::Engine::Global(); - EXPECT_THROW(ac.setWorkingDirectory("testdir_does_not_exist"), + EXPECT_THROW(eg.setWorkingDirectory("testdir_does_not_exist"), std::invalid_argument); } diff --git a/test/unittest/unittest_nntrainer_internal.cpp b/test/unittest/unittest_nntrainer_internal.cpp index fc44549cea..e3b3073746 100644 --- a/test/unittest/unittest_nntrainer_internal.cpp +++ b/test/unittest/unittest_nntrainer_internal.cpp @@ -36,8 +36,9 @@ */ TEST(nntrainer_Optimizer, create_01_p) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_NO_THROW(op = ac.createObject("adam", {})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_NO_THROW(op = ac->createOptimizerObject("adam", {})); } /** @@ -45,9 +46,9 @@ TEST(nntrainer_Optimizer, create_01_p) { */ TEST(nntrainer_Optimizer, create_02_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW( - op = ac.createObject("adam", {"unknown"})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = ac->createOptimizerObject("adam", {"unknown"})); } /** @@ -55,9 +56,9 @@ TEST(nntrainer_Optimizer, create_02_n) { */ TEST(nntrainer_Optimizer, create_03_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW(op = - ac.createObject("adam", {"lr=0.1"})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = ac->createOptimizerObject("adam", {"lr=0.1"})); } /** @@ -65,9 +66,10 @@ TEST(nntrainer_Optimizer, create_03_n) { */ TEST(nntrainer_Optimizer, create_04_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW( - op = ac.createObject("adam", {"learning_rate:0.1"})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = + ac->createOptimizerObject("adam", {"learning_rate:0.1"})); } /** @@ -75,8 +77,9 @@ TEST(nntrainer_Optimizer, create_04_n) { */ TEST(nntrainer_Optimizer, create_05_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_NO_THROW(op = ac.createObject("sgd", {})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_NO_THROW(op = ac->createOptimizerObject("sgd", {})); } /** @@ -84,9 +87,9 @@ TEST(nntrainer_Optimizer, create_05_n) { */ TEST(nntrainer_Optimizer, create_06_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW(op = - ac.createObject("sgd", {"lr=0.1"})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = ac->createOptimizerObject("sgd", {"lr=0.1"})); } /** @@ -94,9 +97,11 @@ TEST(nntrainer_Optimizer, create_06_n) { */ TEST(nntrainer_Optimizer, create_07_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW( - op = ac.createObject("sgd", {"learning_rate:0.1"})); + // auto &ac = nntrainer::AppContext::Global(); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = + ac->createOptimizerObject("sgd", {"learning_rate:0.1"})); } /** @@ -104,9 +109,9 @@ TEST(nntrainer_Optimizer, create_07_n) { */ TEST(nntrainer_Optimizer, create_08_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW(op = - ac.createObject("sgd", {"unknown"})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = ac->createOptimizerObject("sgd", {"unknown"})); } /** @@ -114,9 +119,9 @@ TEST(nntrainer_Optimizer, create_08_n) { */ TEST(nntrainer_Optimizer, create_09_n) { std::unique_ptr op; - auto &ac = nntrainer::AppContext::Global(); - EXPECT_ANY_THROW( - op = ac.createObject("non-existing type", {})); + auto &eg = nntrainer::Engine::Global(); + auto ac = eg.getRegisteredContext("cpu"); + EXPECT_ANY_THROW(op = ac->createOptimizerObject("non-existing type", {})); } TEST(nntrainer_throw_if, throw_invalid_arg_p) { diff --git a/test/unittest/unittest_nntrainer_lr_scheduler.cpp b/test/unittest/unittest_nntrainer_lr_scheduler.cpp index 20f2df25f7..8d49a45bb8 100644 --- a/test/unittest/unittest_nntrainer_lr_scheduler.cpp +++ b/test/unittest/unittest_nntrainer_lr_scheduler.cpp @@ -25,8 +25,8 @@ static std::unique_ptr createLRS(const std::string &type) { - auto &ac = nntrainer::AppContext::Global(); - auto lrs = ac.createObject(type); + auto &eg = nntrainer::Engine::Global(); + auto lrs = eg.createLearningRateSchedulerObject(type, {}); auto lrs_ptr = static_cast(lrs.release()); return std::unique_ptr(lrs_ptr); } diff --git a/test/unittest/unittest_nntrainer_modelfile.cpp b/test/unittest/unittest_nntrainer_modelfile.cpp index 3f0dbb9b64..c5fdd7ec1b 100644 --- a/test/unittest/unittest_nntrainer_modelfile.cpp +++ b/test/unittest/unittest_nntrainer_modelfile.cpp @@ -41,12 +41,11 @@ class nntrainerIniTest public: static void SetUpTestCase() { - nntrainer::AppContext::Global().setWorkingDirectory( - getResPath("", {"test"})); + nntrainer::Engine::Global().setWorkingDirectory(getResPath("", {"test"})); } static void TearDownTestCase() { - nntrainer::AppContext::Global().unsetWorkingDirectory(); + nntrainer::Engine::Global().unsetWorkingDirectory(); } protected: @@ -758,9 +757,9 @@ TEST(nntrainerIniTest, backbone_based_on_working_directory_p) { {nw_base_cross + "loss=mse", adam, input, backbone_valid + "input_layers=inputlayer"}}; - nntrainer::AppContext ac(nntrainer::AppContext::Global()); - ac.setWorkingDirectory(getResPath("", {"test"})); - nntrainer::NeuralNetwork NN(ac); + nntrainer::Engine eg(nntrainer::Engine::Global()); + eg.setWorkingDirectory(getResPath("", {"test"})); + nntrainer::NeuralNetwork NN(eg); EXPECT_EQ(NN.loadFromConfig(s.getIniName()), ML_ERROR_NONE); }