diff --git a/runtime/onert/backend/train/TensorManager.cc b/runtime/onert/backend/train/TensorManager.cc index d8404fcc9ed..03c36c5dc73 100644 --- a/runtime/onert/backend/train/TensorManager.cc +++ b/runtime/onert/backend/train/TensorManager.cc @@ -58,7 +58,8 @@ TensorManager::TensorManager(const std::shared_ptr ®, uint32_ _trainable_mgr{new TrainableMemoryManager(optim_vars_count)}, _back_prop_mgr{new MemoryManager()}, _gradient_mgr{new MemoryManager()}, // TODO Find a suitable planner of disposable tensors to reduce peak memory usage - _disposable_back_prop_mgr{new DisposableMemoryManager()}, _tensors{reg} + _disposable_back_prop_mgr{new DisposableMemoryManager()}, + _layer_scope_mgr{new LayerScopeMemoryManager()}, _tensors{reg} { // DO NOTHING } @@ -106,6 +107,12 @@ void TensorManager::allocateDisposableBackPropTensors() std::string{"DISPOSABLE BACK_PROP TENSOR "}); } +void TensorManager::allocateLayerScopeTensors() +{ + allocateMemory(_layer_scope_mgr.get(), _tensors->layer_scope_tensors(), + std::string{" LAYERSCOPE TENSOR "}); +} + void TensorManager::claimNonConstPlan(const ir::OperandIndex &index) { auto tensor = _tensors->getNonConstTensor(index); @@ -187,6 +194,19 @@ void TensorManager::releaseDisposableBackPropPlan(const DisposableTensorIndex &i _disposable_back_prop_mgr->releasePlan(index); } +void TensorManager::claimLayerScopePlan(const ExtraTensorIndex &index) +{ + const auto tensor = _tensors->getExtraTensor(index); + + auto size = alignedSize(tensor->total_size(), _align); + _layer_scope_mgr->claimPlan(index, size); +} + +void TensorManager::releaseLayerScopePlan(const ExtraTensorIndex &index) +{ + _layer_scope_mgr->releasePlan(index); +} + } // namespace train } // namespace backend } // namespace onert diff --git a/runtime/onert/backend/train/TensorManager.h b/runtime/onert/backend/train/TensorManager.h index c9553c3913e..faec36b8016 100644 --- a/runtime/onert/backend/train/TensorManager.h +++ b/runtime/onert/backend/train/TensorManager.h @@ -49,6 +49,7 @@ class TensorManager void allocateBackPropTensors(); void allocateGradientTensors(); void allocateDisposableBackPropTensors(); + void allocateLayerScopeTensors(); // TODO Add member functions to deallocate tensors void claimNonConstPlan(const ir::OperandIndex &ind); @@ -61,7 +62,8 @@ class TensorManager void releaseGradientPlan(const ir::OperandIndex &ind); void claimDisposableBackPropPlan(const DisposableTensorIndex &ind); void releaseDisposableBackPropPlan(const DisposableTensorIndex &ind); - // TODO Add member functions related to LayerScopeMemoryManager + void claimLayerScopePlan(const LayerScopeTensorIndex &ind); + void releaseLayerScopePlan(const LayerScopeTensorIndex &ind); private: std::unique_ptr _nonconst_mgr; @@ -69,8 +71,7 @@ class TensorManager std::unique_ptr _back_prop_mgr; std::unique_ptr _gradient_mgr; std::unique_ptr _disposable_back_prop_mgr; - // TODO: enable _layer_scope_mgr - // std::unique_ptr _layer_scope_mgr; + std::unique_ptr _layer_scope_mgr; const std::shared_ptr _tensors; };