Skip to content

Commit

Permalink
[onert-micro] Reduce duplicate code in pool layers
Browse files Browse the repository at this point in the history
This commit reduces duplicate code in pool layers.
  - Introduce Pool2DCommon.h that has common functions for pool layers.
    - configure_kernel_CirclePool2DCommon : unifies configure function of pool layers.
    - createPoolParams : unifies creating parameters of pool layers.
  - Apply common function for pool layers.

ONE-DCO-1.0-Signed-off-by: ragmani <[email protected]>
  • Loading branch information
ragmani committed Jan 9, 2024
1 parent 3dc37ed commit 63d9907
Show file tree
Hide file tree
Showing 4 changed files with 144 additions and 198 deletions.
78 changes: 6 additions & 72 deletions onert-micro/luci-interpreter/src/kernels/AveragePool2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,8 @@
* limitations under the License.
*/

#include "Builders.h"
#include "Pool2DCommon.h"

#include "kernels/Utils.h"
#include "PALAveragePool2D.h"

namespace luci_interpreter
Expand All @@ -26,88 +25,23 @@ namespace luci_interpreter
void configure_kernel_CircleAveragePool2D(const circle::Operator *cur_op,
BaseRuntimeGraph *runtime_graph)
{
const auto input_index = cur_op->inputs()->operator[](0);
const auto output_index = cur_op->outputs()->operator[](0);

assert(input_index != -1);
assert(output_index != -1);

const auto input = runtime_graph->getCircleTensorByIndex(input_index);
const auto output = runtime_graph->getCircleTensorByIndex(output_index);

LUCI_INTERPRETER_CHECK(Tensor::element_type(input) == Tensor::element_type(output));
LUCI_INTERPRETER_CHECK(Tensor::num_dims(input) == 4);
configure_kernel_CirclePool2DCommon(cur_op, runtime_graph);
}

void execute_kernel_CircleAveragePool2D(const circle::Operator *cur_op,
BaseRuntimeGraph *runtime_graph)
{
const auto input_index = cur_op->inputs()->operator[](0);
const auto output_index = cur_op->outputs()->operator[](0);

assert(input_index != -1);
assert(output_index != -1);

const auto input = runtime_graph->getCircleTensorByIndex(input_index);
auto output = runtime_graph->getCircleTensorByIndex(output_index);

const auto *options = cur_op->builtin_options_as_Pool2DOptions();
const kernels::SISOKernel siso_kernel(cur_op, runtime_graph);

const int32_t input_height = Tensor::dim(input, 1);
const int32_t input_width = Tensor::dim(input, 2);

const int32_t output_height = kernels::computeOutputSize(
luci_padding(options->padding()), input_height, options->filter_height(), options->stride_h());
const int32_t output_width = kernels::computeOutputSize(
luci_padding(options->padding()), input_width, options->filter_width(), options->stride_w());

const auto padding_height = kernels::computePadding(options->stride_h(), 1, input_height,
options->filter_height(), output_height);
const auto padding_width = kernels::computePadding(options->stride_w(), 1, input_width,
options->filter_width(), output_width);
const auto input = siso_kernel.input();
const auto output = siso_kernel.output();

const auto *input_data = runtime_graph->getDataByTensor(input);
auto *output_data = runtime_graph->getDataByTensor(output);

const DataType input_type = Tensor::element_type(input);

float activation_min{};
float activation_max{};

int32_t quantized_activation_min{};
int32_t quantized_activation_max{};

if (input_type == DataType::S8 or input_type == DataType::S16)
{
#ifndef DIS_QUANT
kernels::calculateActivationRangeQuantized(luci_actfunc(options->fused_activation_function()),
output, &quantized_activation_min,
&quantized_activation_max);
#endif // DIS_QUANT
}
else if (input_type == DataType::FLOAT32)
{
#ifndef DIS_FLOAT
kernels::calculateActivationRange(luci_actfunc(options->fused_activation_function()),
&activation_min, &activation_max);
#endif // DIS_FLOAT
}
else
{
assert(false && "Not supported type");
}

luci_interpreter_pal::PoolParams params{};
params.padding_values.height = padding_height;
params.padding_values.width = padding_width;
params.stride_height = options->stride_h();
params.stride_width = options->stride_w();
params.filter_height = options->filter_height();
params.filter_width = options->filter_width();
params.float_activation_min = activation_min;
params.float_activation_max = activation_max;
params.quantized_activation_max = quantized_activation_max;
params.quantized_activation_min = quantized_activation_min;
const auto params = createPoolParams(cur_op, runtime_graph);

switch (input_type)
{
Expand Down
46 changes: 4 additions & 42 deletions onert-micro/luci-interpreter/src/kernels/L2Pool2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,69 +15,31 @@
* limitations under the License.
*/

#include "Builders.h"
#include "SISOKernel.h"
#include "kernels/Utils.h"
#include "Pool2DCommon.h"

#include "PALL2Pool2D.h"

namespace luci_interpreter
{
void configure_kernel_CircleL2Pool2D(const circle::Operator *cur_op,
BaseRuntimeGraph *runtime_graph)
{
const kernels::SISOKernel siso_kernel(cur_op, runtime_graph);

LUCI_INTERPRETER_CHECK(Tensor::element_type(siso_kernel.input()) ==
Tensor::element_type(siso_kernel.output()));
LUCI_INTERPRETER_CHECK(Tensor::num_dims(siso_kernel.input()) == 4);
LUCI_INTERPRETER_CHECK(Tensor::num_dims(siso_kernel.input()) ==
Tensor::num_dims(siso_kernel.output()));
configure_kernel_CirclePool2DCommon(cur_op, runtime_graph);
}

void execute_kernel_CircleL2Pool2D(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
{
const kernels::SISOKernel siso_kernel(cur_op, runtime_graph);

const auto *options = cur_op->builtin_options_as_Pool2DOptions();

const auto input = siso_kernel.input();
const auto output = siso_kernel.output();

const int32_t input_height = Tensor::dim(input, 1);
const int32_t input_width = Tensor::dim(input, 2);

const int32_t output_height = kernels::computeOutputSize(
luci_padding(options->padding()), input_height, options->filter_height(), options->stride_h());
const int32_t output_width = kernels::computeOutputSize(
luci_padding(options->padding()), input_width, options->filter_width(), options->stride_w());

const auto padding_height = kernels::computePadding(options->stride_h(), 1, input_height,
options->filter_height(), output_height);
const auto padding_width = kernels::computePadding(options->stride_w(), 1, input_width,
options->filter_width(), output_width);

const auto *input_data = runtime_graph->getDataByTensor(input);
auto *output_data = runtime_graph->getDataByTensor(output);

const DataType input_type = Tensor::element_type(input);

float activation_min{};
float activation_max{};

#ifndef DIS_FLOAT
kernels::calculateActivationRange(luci_actfunc(options->fused_activation_function()),
&activation_min, &activation_max);
#endif // DIS_FLOAT

luci_interpreter_pal::PoolParams params{};
params.padding_values.height = padding_height;
params.padding_values.width = padding_width;
params.stride_height = options->stride_h();
params.stride_width = options->stride_w();
params.filter_height = options->filter_height();
params.filter_width = options->filter_width();
params.float_activation_min = activation_min;
params.float_activation_max = activation_max;
const auto params = createPoolParams(cur_op, runtime_graph);

switch (input_type)
{
Expand Down
90 changes: 6 additions & 84 deletions onert-micro/luci-interpreter/src/kernels/MaxPool2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,109 +14,31 @@
* limitations under the License.
*/

#include "Builders.h"
#include "Pool2DCommon.h"

#include "kernels/Utils.h"
#include "PALMaxPool2D.h"

namespace luci_interpreter
{
void configure_kernel_CircleMaxPool2D(const circle::Operator *cur_op,
BaseRuntimeGraph *runtime_graph)
{
const auto input_index = cur_op->inputs()->operator[](0);
const auto output_index = cur_op->outputs()->operator[](0);

assert(input_index != -1);
assert(output_index != -1);

const auto input = runtime_graph->getCircleTensorByIndex(input_index);
const auto output = runtime_graph->getCircleTensorByIndex(output_index);

LUCI_INTERPRETER_CHECK(Tensor::element_type(input) == Tensor::element_type(output));
LUCI_INTERPRETER_CHECK(Tensor::num_dims(input) == 4);

#ifndef DIS_QUANT
if (Tensor::element_type(input) == DataType::U8)
{
LUCI_INTERPRETER_CHECK(std::abs(Tensor::scale(output) - Tensor::scale(input)) <= 1.0e-6);
LUCI_INTERPRETER_CHECK(Tensor::zero_point(output) == Tensor::zero_point(input));
}
else if (Tensor::element_type(input) == DataType::S16)
{
LUCI_INTERPRETER_CHECK(std::abs(Tensor::scale(output) - Tensor::scale(input)) <= 1.0e-6);
LUCI_INTERPRETER_CHECK(Tensor::zero_point(input) == 0 && Tensor::zero_point(output) == 0);
}
#endif // DIS_QUANT
configure_kernel_CirclePool2DCommon(cur_op, runtime_graph);
}

void execute_kernel_CircleMaxPool2D(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
{
const auto input_index = cur_op->inputs()->operator[](0);
const auto output_index = cur_op->outputs()->operator[](0);

assert(input_index != -1);
assert(output_index != -1);

const auto input = runtime_graph->getCircleTensorByIndex(input_index);
auto output = runtime_graph->getCircleTensorByIndex(output_index);

const auto *options = cur_op->builtin_options_as_Pool2DOptions();

const int32_t input_height = Tensor::dim(input, 1);
const int32_t input_width = Tensor::dim(input, 2);
const kernels::SISOKernel siso_kernel(cur_op, runtime_graph);

const int32_t output_height = kernels::computeOutputSize(
luci_padding(options->padding()), input_height, options->filter_height(), options->stride_h());
const int32_t output_width = kernels::computeOutputSize(
luci_padding(options->padding()), input_width, options->filter_width(), options->stride_w());

const auto padding_height = kernels::computePadding(options->stride_h(), 1, input_height,
options->filter_height(), output_height);
const auto padding_width = kernels::computePadding(options->stride_w(), 1, input_width,
options->filter_width(), output_width);
const auto input = siso_kernel.input();
const auto output = siso_kernel.output();

const auto *input_data = runtime_graph->getDataByTensor(input);
auto *output_data = runtime_graph->getDataByTensor(output);

const DataType input_type = Tensor::element_type(input);

float activation_min{};
float activation_max{};
int32_t quantized_activation_min{};
int32_t quantized_activation_max{};

if (input_type == DataType::S8 or input_type == DataType::S16)
{
#ifndef DIS_QUANT
kernels::calculateActivationRangeQuantized(luci_actfunc(options->fused_activation_function()),
output, &quantized_activation_min,
&quantized_activation_max);
#endif // DIS_QUANT
}
else if (input_type == DataType::FLOAT32)
{
#ifndef DIS_FLOAT
kernels::calculateActivationRange(luci_actfunc(options->fused_activation_function()),
&activation_min, &activation_max);
#endif // DIS_FLOAT
}
else
{
assert(false && "Not supported type");
}

luci_interpreter_pal::PoolParams params{};
params.padding_values.height = padding_height;
params.padding_values.width = padding_width;
params.stride_height = options->stride_h();
params.stride_width = options->stride_w();
params.filter_height = options->filter_height();
params.filter_width = options->filter_width();
params.float_activation_min = activation_min;
params.float_activation_max = activation_max;
params.quantized_activation_max = quantized_activation_max;
params.quantized_activation_min = quantized_activation_min;
const auto params = createPoolParams(cur_op, runtime_graph);

switch (input_type)
{
Expand Down
Loading

0 comments on commit 63d9907

Please sign in to comment.