diff --git a/nntrainer/layers/layer_context.h b/nntrainer/layers/layer_context.h index fe3938f753..e5c6759638 100644 --- a/nntrainer/layers/layer_context.h +++ b/nntrainer/layers/layer_context.h @@ -172,8 +172,7 @@ class InitLayerContext { /** * @brief Request a new weight for the layer * - * @param dim_v dimension of Variagble of the weight - * @param dim_g dimension of Gradient of the weight + * @param dim dimension of Variable of the weight * @param init initializer for the weight * @param reg regularizer for the weight * @param reg_const regularization constant for the weight diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp index 62eececf0f..d0e542825f 100644 --- a/nntrainer/models/neuralnet.cpp +++ b/nntrainer/models/neuralnet.cpp @@ -65,12 +65,13 @@ namespace nntrainer { NeuralNetwork::NeuralNetwork() : - model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm()), + model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm(), + props::LossScale()), model_flex_props( props::Epochs(), props::TrainingBatchSize(), props::SavePath(), props::ContinueTrain(), props::SaveBestPath(), props::MemoryOptimization(), props::MemorySwap(), props::MemorySwapPath(), props::MemorySwapLookahead(), - props::TensorFormat(), props::ModelTensorDataType(), props::LossScale()), + props::TensorFormat(), props::ModelTensorDataType()), load_path(std::string()), epoch_idx(0), iter(0), @@ -83,12 +84,13 @@ NeuralNetwork::NeuralNetwork() : } NeuralNetwork::NeuralNetwork(AppContext app_context_) : - model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm()), + model_props(props::LossType(), {}, {}, props::ClipGradByGlobalNorm(), + props::LossScale()), model_flex_props( props::Epochs(), props::TrainingBatchSize(), props::SavePath(), props::ContinueTrain(), props::SaveBestPath(), props::MemoryOptimization(), props::MemorySwap(), props::MemorySwapPath(), props::MemorySwapLookahead(), - props::TensorFormat(), props::ModelTensorDataType(), props::LossScale()), + props::TensorFormat(), props::ModelTensorDataType()), load_path(std::string()), epoch_idx(0), iter(0), @@ -179,9 +181,8 @@ int NeuralNetwork::compile() { const std::string tensor_type = to_string(std::get(model_flex_props)); - const float loss_scale = std::get(model_flex_props); model_graph = NetworkGraph(memory_swap, memory_swap_path, lookahead, - tensor_format, tensor_type, loss_scale); + tensor_format, tensor_type); model_graph.setMemoryOptimizations( std::get(model_flex_props)); diff --git a/nntrainer/models/neuralnet.h b/nntrainer/models/neuralnet.h index a2923ae8a7..da1571a328 100644 --- a/nntrainer/models/neuralnet.h +++ b/nntrainer/models/neuralnet.h @@ -624,14 +624,16 @@ s * @retval shared_ptr const std::string file_path) override; private: - using FlexiblePropTypes = std::tuple< - props::Epochs, props::TrainingBatchSize, props::SavePath, - props::ContinueTrain, props::SaveBestPath, props::MemoryOptimization, - props::MemorySwap, props::MemorySwapPath, props::MemorySwapLookahead, - props::TensorFormat, props::ModelTensorDataType, props::LossScale>; + using FlexiblePropTypes = + std::tuple; using RigidPropTypes = std::tuple, - std::vector, props::ClipGradByGlobalNorm>; + std::vector, props::ClipGradByGlobalNorm, + props::LossScale>; RigidPropTypes model_props; /**< model props */ FlexiblePropTypes model_flex_props; /**< model train props */