Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Wait for #2849] [ Context ] Add Engine Class to manage Contexts #2848

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions Applications/Custom/LayerClient/jni/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

/// @todo Migrate this to api
#include <app_context.h>
#include <engine.h>

#include <mae_loss.h>
#include <pow.h>
Expand Down Expand Up @@ -195,13 +196,15 @@ int main(int argc, char *argv[]) {
}

try {
auto &app_context = nntrainer::AppContext::Global();
auto &ct_engine = nntrainer::Engine::Global();
auto app_context = static_cast<nntrainer::AppContext *>(
ct_engine.getRegisteredContext("cpu"));
/// registering custom layer here
/// registerFactory excepts a function that returns unique_ptr<Layer> from
/// std::vector<std::string> ml::train::createLayer<T> is a templated
/// function for generic usage
app_context.registerFactory(nntrainer::createLayer<custom::PowLayer>);
app_context.registerFactory(nntrainer::createLayer<custom::MaeLossLayer>);
app_context->registerFactory(nntrainer::createLayer<custom::PowLayer>);
app_context->registerFactory(nntrainer::createLayer<custom::MaeLossLayer>);
} catch (std::invalid_argument &e) {
std::cerr << "failed to register factory, reason: " << e.what()
<< std::endl;
Expand Down
10 changes: 6 additions & 4 deletions Applications/LLaMA/jni/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

#include <app_context.h>
#include <custom_multi_head_attention_layer.h>
#include <engine.h>
#include <rms_norm.h>
#include <rotary_embedding.h>
#include <swiglu.h>
Expand Down Expand Up @@ -723,18 +724,19 @@ int main(int argc, char *argv[]) {
#else
std::string text = "This is smaple input for LLaMA.";
#endif

auto &app_context = nntrainer::AppContext::Global();
auto &ct_engine = nntrainer::Engine::Global();
auto app_context =
static_cast<nntrainer::AppContext *>(ct_engine.getRegisteredContext("cpu"));
try {
app_context.registerFactory(nntrainer::createLayer<custom::SwiGLULayer>);
app_context->registerFactory(nntrainer::createLayer<custom::SwiGLULayer>);
} catch (std::invalid_argument &e) {
std::cerr << "failed to register factory, reason: " << e.what()
<< std::endl;
return 1;
}

try {
app_context.registerFactory(nntrainer::createLayer<custom::RMSNormLayer>);
app_context->registerFactory(nntrainer::createLayer<custom::RMSNormLayer>);
} catch (std::invalid_argument &e) {
std::cerr << "failed to register factory, reason: " << e.what()
<< std::endl;
Expand Down
7 changes: 5 additions & 2 deletions Applications/SimpleShot/task_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <unistd.h>

#include <app_context.h>
#include <engine.h>
#include <model.h>
#include <nntrainer-api-common.h>

Expand Down Expand Up @@ -183,7 +184,9 @@ std::unique_ptr<ml::train::Model> createModel(const std::string &backbone,
* @return int
*/
int main(int argc, char **argv) {
auto &app_context = nntrainer::AppContext::Global();
auto &ct_engine = nntrainer::Engine::Global();
auto app_context =
static_cast<nntrainer::AppContext *>(ct_engine.getRegisteredContext("cpu"));

if (argc != 6 && argc != 5) {
std::cout
Expand Down Expand Up @@ -221,7 +224,7 @@ int main(int argc, char **argv) {
std::string val_path = app_path + "/tasks/" + argv[4];

try {
app_context.registerFactory(
app_context->registerFactory(
nntrainer::createLayer<simpleshot::layers::CenteringLayer>);
} catch (std::exception &e) {
std::cerr << "registering factory failed: " << e.what();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
#include <stdlib.h>
#include <time.h>

#include <app_context.h>
#include <engine.h>
#include <neuralnet.h>
#include <tensor.h>

Expand Down Expand Up @@ -375,7 +375,7 @@ int main(int argc, char *argv[]) {

/// @todo add api version of this
try {
nntrainer::AppContext::Global().setWorkingDirectory(data_path);
nntrainer::Engine::Global().setWorkingDirectory(data_path);
} catch (std::invalid_argument &e) {
std::cerr << "setting data_path failed, pwd is used instead";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

#include <bitmap_helpers.h>

#include <app_context.h>
#include <engine.h>
#define TRAINING true

/**
Expand Down Expand Up @@ -254,7 +254,7 @@ int main(int argc, char *argv[]) {

/// @todo add api version of this
try {
nntrainer::AppContext::Global().setWorkingDirectory(data_path);
nntrainer::Engine::Global().setWorkingDirectory(data_path);
} catch (std::invalid_argument &e) {
std::cerr << "setting data_path failed, pwd is used instead";
}
Expand Down
11 changes: 8 additions & 3 deletions Applications/YOLOv2/jni/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include <app_context.h>
#include <det_dataloader.h>
#include <engine.h>
#include <layer.h>
#include <model.h>
#include <optimizer.h>
Expand Down Expand Up @@ -285,9 +286,13 @@ int main(int argc, char *argv[]) {
<< std::endl;

try {
auto &app_context = nntrainer::AppContext::Global();
app_context.registerFactory(nntrainer::createLayer<custom::ReorgLayer>);
app_context.registerFactory(
auto &ct_engine = nntrainer::Engine::Global();

auto app_context = static_cast<nntrainer::AppContext *>(
ct_engine.getRegisteredContext("cpu"));

app_context->registerFactory(nntrainer::createLayer<custom::ReorgLayer>);
app_context->registerFactory(
nntrainer::createLayer<custom::YoloV2LossLayer>);
} catch (std::invalid_argument &e) {
std::cerr << "failed to register factory, reason: " << e.what()
Expand Down
14 changes: 10 additions & 4 deletions Applications/YOLOv3/jni/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

#include <app_context.h>
#include <det_dataloader.h>
#include <engine.h>
#include <layer.h>
#include <model.h>
#include <optimizer.h>
Expand Down Expand Up @@ -402,17 +403,22 @@ int main(int argc, char *argv[]) {
<< std::endl;

try {
auto &app_context = nntrainer::AppContext::Global();
app_context.registerFactory(nntrainer::createLayer<custom::UpsampleLayer>);
auto &ct_engine = nntrainer::Engine::Global();
auto app_context = static_cast<nntrainer::AppContext *>(
ct_engine.getRegisteredContext("cpu"));

app_context->registerFactory(nntrainer::createLayer<custom::UpsampleLayer>);
} catch (std::invalid_argument &e) {
std::cerr << "failed to register factory, reason: " << e.what()
<< std::endl;
return 1;
}

try {
auto &app_context = nntrainer::AppContext::Global();
app_context.registerFactory(
auto &ct_engine = nntrainer::Engine::Global();
auto app_context = static_cast<nntrainer::AppContext *>(
ct_engine.getRegisteredContext("cpu"));
app_context->registerFactory(
nntrainer::createLayer<custom::YoloV3LossLayer>);
} catch (std::invalid_argument &e) {
std::cerr << "failed to register yolov3 loss, reason: " << e.what()
Expand Down
9 changes: 9 additions & 0 deletions api/ccapi/include/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,15 @@ enum class ExecutionMode {
VALIDATE /** Validate mode, label is necessary */
};

/**
* @brief Enumeration of layer compute engine
*/
enum LayerComputeEngine {
CPU, /**< CPU as the compute engine */
GPU, /**< GPU as the compute engine */
QNN, /**< QNN as the compute engine */
};

/**
* @brief Get the version of NNTrainer
*/
Expand Down
63 changes: 24 additions & 39 deletions api/ccapi/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,6 @@ enum LayerType {
LAYER_UNKNOWN = ML_TRAIN_LAYER_TYPE_UNKNOWN /**< Unknown */
};

/**
* @brief Enumeration of layer compute engine
*/
enum LayerComputeEngine {
CPU, /**< CPU as the compute engine */
GPU, /**< GPU as the compute engine */
};

/**
* @class Layer Base class for layers
* @brief Base class for all layers
Expand Down Expand Up @@ -261,16 +253,14 @@ class Layer {
*/
std::unique_ptr<Layer>
createLayer(const LayerType &type,
const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU);
const std::vector<std::string> &properties = {});

/**
* @brief Factory creator with constructor for layer
*/
std::unique_ptr<Layer>
createLayer(const std::string &type,
const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU);
const std::vector<std::string> &properties = {});

/**
* @brief General Layer Factory function to register Layer
Expand Down Expand Up @@ -343,37 +333,35 @@ DivideLayer(const std::vector<std::string> &properties = {}) {
/**
* @brief Helper function to create fully connected layer
*/
inline std::unique_ptr<Layer> FullyConnected(
const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) {
return createLayer(LayerType::LAYER_FC, properties, compute_engine);
inline std::unique_ptr<Layer>
FullyConnected(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_FC, properties);
}

/**
* @brief Helper function to create Swiglu layer
*/
inline std::unique_ptr<Layer>
Swiglu(const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) {
return createLayer(LayerType::LAYER_SWIGLU, properties, compute_engine);
Swiglu(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_SWIGLU, properties);
}

/**
* @brief Helper function to create RMS normalization layer for GPU
*/
inline std::unique_ptr<Layer>
RMSNormCl(const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::GPU) {
return createLayer(LayerType::LAYER_RMSNORM, properties, compute_engine);
}
// /**
// * @brief Helper function to create RMS normalization layer for GPU
// */
// inline std::unique_ptr<Layer>
// RMSNormCl(const std::vector<std::string> &properties = {},
// const LayerComputeEngine &compute_engine = LayerComputeEngine::GPU)
// {
// return createLayer(LayerType::LAYER_RMSNORM, properties, compute_engine);
// }

/**
* @brief Helper function to create Transpose layer
*/
inline std::unique_ptr<Layer>
Transpose(const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) {
return createLayer(LayerType::LAYER_TRANSPOSE, properties, compute_engine);
Transpose(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_TRANSPOSE, properties);
}

/**
Expand Down Expand Up @@ -428,27 +416,24 @@ Flatten(const std::vector<std::string> &properties = {}) {
* @brief Helper function to create reshape layer
*/
inline std::unique_ptr<Layer>
Reshape(const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) {
return createLayer(LayerType::LAYER_RESHAPE, properties, compute_engine);
Reshape(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_RESHAPE, properties);
}

/**
* @brief Helper function to create addition layer
*/
inline std::unique_ptr<Layer>
Addition(const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) {
return createLayer(LayerType::LAYER_ADDITION, properties, compute_engine);
Addition(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_ADDITION, properties);
}

/**
* @brief Helper function to create concat layer
*/
inline std::unique_ptr<Layer>
Concat(const std::vector<std::string> &properties = {},
const LayerComputeEngine &compute_engine = LayerComputeEngine::CPU) {
return createLayer(LayerType::LAYER_CONCAT, properties, compute_engine);
Concat(const std::vector<std::string> &properties = {}) {
return createLayer(LayerType::LAYER_CONCAT, properties);
}

/**
Expand Down
20 changes: 9 additions & 11 deletions api/ccapi/src/factory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@
#include <string>
#include <vector>

#include <app_context.h>
#include <databuffer.h>
#include <databuffer_factory.h>
#include <engine.h>
#include <layer.h>
#include <model.h>
#include <neuralnet.h>
Expand All @@ -31,18 +31,16 @@ namespace ml {
namespace train {

std::unique_ptr<Layer> createLayer(const LayerType &type,
const std::vector<std::string> &properties,
const LayerComputeEngine &compute_engine) {
return nntrainer::createLayerNode(type, properties, compute_engine);
const std::vector<std::string> &properties) {
return nntrainer::createLayerNode(type, properties);
}

/**
* @brief Factory creator with constructor for layer
*/
std::unique_ptr<Layer> createLayer(const std::string &type,
const std::vector<std::string> &properties,
const LayerComputeEngine &compute_engine) {
return nntrainer::createLayerNode(type, properties, compute_engine);
const std::vector<std::string> &properties) {
return nntrainer::createLayerNode(type, properties);
}

std::unique_ptr<Optimizer>
Expand Down Expand Up @@ -129,8 +127,8 @@ createDataset(DatasetType type, datagen_cb cb, void *user_data,
std::unique_ptr<ml::train::LearningRateScheduler>
createLearningRateScheduler(const LearningRateSchedulerType &type,
const std::vector<std::string> &properties) {
auto &ac = nntrainer::AppContext::Global();
return ac.createObject<ml::train::LearningRateScheduler>(type, properties);
auto &eg = nntrainer::Engine::Global();
return eg.createLearningRateSchedulerObject(type, properties);
}

/**
Expand All @@ -139,8 +137,8 @@ createLearningRateScheduler(const LearningRateSchedulerType &type,
std::unique_ptr<ml::train::LearningRateScheduler>
createLearningRateScheduler(const std::string &type,
const std::vector<std::string> &properties) {
auto &ac = nntrainer::AppContext::Global();
return ac.createObject<ml::train::LearningRateScheduler>(type, properties);
auto &eg = nntrainer::Engine::Global();
return eg.createLearningRateSchedulerObject(type, properties);
}

std::string getVersion() {
Expand Down
2 changes: 2 additions & 0 deletions debian/nntrainer-dev.install
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
/usr/include/nntrainer/loss_layer.h
# custom layer kits
/usr/include/nntrainer/app_context.h
/usr/include/nntrainer/context.h
/usr/include/nntrainer/engine.h
# logger
/usr/include/nntrainer/nntrainer_log.h
/usr/include/nntrainer/nntrainer_logger.h
Expand Down
Loading
Loading