Skip to content

Commit

Permalink
[onert-micro] Reduce SpaceToBatchND code duplication
Browse files Browse the repository at this point in the history
This pr reduces code duplication for SpacesBatchesND and BatchToSpaceND.

ONE-DCO-1.0-Signed-off-by: Artem Balyshev <[email protected]>
  • Loading branch information
Artem Balyshev committed Jun 25, 2024
1 parent 41cca4a commit dd8f03a
Show file tree
Hide file tree
Showing 11 changed files with 313 additions and 301 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef ONERT_MICRO_EXECUTE_KERNELS_SPACES_BATCHES_ND_COMMON_H
#define ONERT_MICRO_EXECUTE_KERNELS_SPACES_BATCHES_ND_COMMON_H

#include "OMStatus.h"

#include "core/OMUtils.h"
#include "core/OMKernelData.h"

#include "execute/OMKernelExecutionBuilder.h"
#include "execute/OMUtils.h"
#include "execute/OMRuntimeKernel.h"
#include <functional>

namespace onert_micro
{
namespace execute
{

OMStatus execute_spaces_batches_nd_common(
const OMExecuteArgs &execute_args,
const std::function<
OMStatus(const core::OMRuntimeShape &unextended_input1_shape, const float *input1_data,
const core::OMRuntimeShape &unextended_input2_shape, const int32_t *block_shape_data,
const core::OMRuntimeShape &unextended_input3_shape, const int32_t *crops_data,
const core::OMRuntimeShape &unextended_output_shape, float *output_data)> &f);

} // namespace execute
} // namespace onert_micro

#endif // ONERT_MICRO_EXECUTE_KERNELS_SPACES_BATCHES_ND_COMMON_H
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef ONERT_MICRO_IMPORT_HELPERS_CONFIGURE_SPACES_BATCHES_ND_COMMON_H
#define ONERT_MICRO_IMPORT_HELPERS_CONFIGURE_SPACES_BATCHES_ND_COMMON_H

#include "import/OMKernelConfigureBuilder.h"
#include "core/OMUtils.h"
#include "OMStatus.h"
#include "execute/OMRuntimeKernel.h"

namespace onert_micro
{
namespace import
{
namespace helpers
{

OMStatus configure_spaces_batches_nd_kernel_common(const OMConfigureArgs &config_args);

} // namespace helpers
} // namespace import
} // namespace onert_micro

#endif // ONERT_MICRO_IMPORT_HELPERS_CONFIGURE_SPACES_BATCHES_ND_COMMON_H
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,10 @@ inline core::OMRuntimeShape extendShapeSpaceToBatch(const core::OMRuntimeShape &

template <typename T>
inline OMStatus
SpaceToBatchND(const int32_t pad_value, const core::OMRuntimeShape &unextended_input1_shape,
const T *input1_data, const core::OMRuntimeShape &unextended_input2_shape,
const int32_t *block_shape_data, const core::OMRuntimeShape &unextended_input3_shape,
const int32_t *paddings_data, const core::OMRuntimeShape &unextended_output_shape,
T *output_data)
SpaceToBatchND(const core::OMRuntimeShape &unextended_input1_shape, const T *input1_data,
const core::OMRuntimeShape &unextended_input2_shape, const int32_t *block_shape_data,
const core::OMRuntimeShape &unextended_input3_shape, const int32_t *paddings_data,
const core::OMRuntimeShape &unextended_output_shape, T *output_data)
{
// Extends the input/output shape from 3D to 4D if needed, NHC -> NH1C.
const core::OMRuntimeShape input1_shape = extendShapeSpaceToBatch(unextended_input1_shape);
Expand All @@ -73,6 +72,8 @@ SpaceToBatchND(const int32_t pad_value, const core::OMRuntimeShape &unextended_i
const int padding_top = paddings_data[0];
const int padding_left = unextended_input1_shape.dimensionsCount() == 4 ? paddings_data[2] : 0;

const int32_t pad_value = 0;

for (int out_b = 0; out_b < output_batch_size; ++out_b)
{
int input_batch = out_b % input_batch_size;
Expand Down
1 change: 1 addition & 0 deletions onert-micro/onert-micro/src/execute/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ set(SOURCES
OMUtils.cpp
kernels/ConvolutionCommon.cpp
kernels/PoolingCommon.cpp
kernels/SpacesBatchesNDCommon.cpp
)

# Add configure kernels
Expand Down
94 changes: 11 additions & 83 deletions onert-micro/onert-micro/src/execute/kernels/BatchToSpaceND.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,95 +14,23 @@
* limitations under the License.
*/

#include "execute/OMUtils.h"
#include "execute/OMKernelExecutionBuilder.h"
#include "OMStatus.h"
#include "execute/OMRuntimeKernel.h"
#include "core/OMUtils.h"

#include "core/OMRuntimeShape.h"
#include "execute/kernels/SpacesBatchesNDCommon.h"
#include "PALBatchToSpaceND.h"

using namespace onert_micro;
using namespace onert_micro::execute;
namespace
{

constexpr uint32_t input1TensorIdx = 0;
constexpr uint32_t input2TensorIdx = 1;
constexpr uint32_t input3TensorIdx = 2;
constexpr uint32_t outputTensorIdx = 0;

} // namespace
OMStatus onert_micro::execute::execute_kernel_CircleBatchToSpaceND(
const onert_micro::execute::OMExecuteArgs &execute_args)
{
core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
uint16_t op_index = execute_args.kernel_index;

const circle::Tensor *input1;
const circle::Tensor *input2;
const circle::Tensor *input3;
const circle::Tensor *output;

uint8_t *input1_data;
uint8_t *input2_data;
uint8_t *input3_data;
uint8_t *output_data;

uint16_t input1_index = 0;
uint16_t input2_index = 0;

// Read kernel

execute::OMRuntimeKernel runtime_kernel;
OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
if (status != Ok)
return status;

input1 = runtime_kernel.inputs[input1TensorIdx];
input2 = runtime_kernel.inputs[input2TensorIdx];
input3 = runtime_kernel.inputs[input3TensorIdx];
output = runtime_kernel.outputs[outputTensorIdx];

core::OMRuntimeShape input1_shape(input1);
core::OMRuntimeShape input2_shape(input1);
core::OMRuntimeShape input3_shape(input1);
core::OMRuntimeShape output_shape(output);

assert(input1 != nullptr);
assert(input2 != nullptr);
assert(input3 != nullptr);
assert(output != nullptr);

status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
if (status != Ok)
return status;

input1_data = runtime_kernel.inputs_data[input1TensorIdx];
input2_data = runtime_kernel.inputs_data[input2TensorIdx];
input3_data = runtime_kernel.inputs_data[input3TensorIdx];
output_data = runtime_kernel.outputs_data[outputTensorIdx];

switch (input1->type())
{
#ifndef DIS_FLOAT
case circle::TensorType_FLOAT32:
{
status = pal::BatchToSpaceND<float>(input1_shape, reinterpret_cast<float *>(input1_data),
input2_shape, reinterpret_cast<int32_t *>(input2_data),
input3_shape, reinterpret_cast<int32_t *>(input3_data),
output_shape, reinterpret_cast<float *>(output_data));
}
break;
#endif // DIS_FLOAT
default:
{
status = UnsupportedType;
assert(false && "Unsupported type.");
}
}

return status;
auto batch_to_space_float_lambda =
[](const core::OMRuntimeShape &input1_shape, const float *input1_data,
const core::OMRuntimeShape &input2_shape, const int32_t *block_shape_data,
const core::OMRuntimeShape &input3_shape, const int32_t *crops_data,
const core::OMRuntimeShape &output_shape, float *output_data) {
return pal::BatchToSpaceND<float>(input1_shape, input1_data, input2_shape, block_shape_data,
input3_shape, crops_data, output_shape, output_data);
};

return execute_spaces_batches_nd_common(execute_args, batch_to_space_float_lambda);
}
96 changes: 11 additions & 85 deletions onert-micro/onert-micro/src/execute/kernels/SpaceToBatchND.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,97 +14,23 @@
* limitations under the License.
*/

#include "execute/OMUtils.h"
#include "execute/OMKernelExecutionBuilder.h"
#include "OMStatus.h"
#include "execute/OMRuntimeKernel.h"
#include "core/OMUtils.h"

#include "core/OMRuntimeShape.h"
#include "execute/kernels/SpacesBatchesNDCommon.h"
#include "PALSpaceToBatchND.h"

using namespace onert_micro;
using namespace onert_micro::execute;
namespace
{

constexpr uint32_t input1TensorIdx = 0;
constexpr uint32_t input2TensorIdx = 1;
constexpr uint32_t input3TensorIdx = 2;
constexpr uint32_t outputTensorIdx = 0;

} // namespace
OMStatus onert_micro::execute::execute_kernel_CircleSpaceToBatchND(
const onert_micro::execute::OMExecuteArgs &execute_args)
{
core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
uint16_t op_index = execute_args.kernel_index;

const circle::Tensor *input1;
const circle::Tensor *input2;
const circle::Tensor *input3;
const circle::Tensor *output;

uint8_t *input1_data;
uint8_t *input2_data;
uint8_t *input3_data;
uint8_t *output_data;

uint16_t input1_index = 0;
uint16_t input2_index = 0;

// Read kernel

execute::OMRuntimeKernel runtime_kernel;
OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
if (status != Ok)
return status;

input1 = runtime_kernel.inputs[input1TensorIdx];
input2 = runtime_kernel.inputs[input2TensorIdx];
input3 = runtime_kernel.inputs[input3TensorIdx];
output = runtime_kernel.outputs[outputTensorIdx];

core::OMRuntimeShape input1_shape(input1);
core::OMRuntimeShape input2_shape(input1);
core::OMRuntimeShape input3_shape(input1);
core::OMRuntimeShape output_shape(output);

assert(input1 != nullptr);
assert(input2 != nullptr);
assert(input3 != nullptr);
assert(output != nullptr);

status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
if (status != Ok)
return status;

input1_data = runtime_kernel.inputs_data[input1TensorIdx];
input2_data = runtime_kernel.inputs_data[input2TensorIdx];
input3_data = runtime_kernel.inputs_data[input3TensorIdx];
output_data = runtime_kernel.outputs_data[outputTensorIdx];
const int32_t pad_value = 0;

switch (input1->type())
{
#ifndef DIS_FLOAT
case circle::TensorType_FLOAT32:
{
status =
pal::SpaceToBatchND<float>(pad_value, input1_shape, reinterpret_cast<float *>(input1_data),
input2_shape, reinterpret_cast<int32_t *>(input2_data),
input3_shape, reinterpret_cast<int32_t *>(input3_data),
output_shape, reinterpret_cast<float *>(output_data));
}
break;
#endif // DIS_FLOAT
default:
{
status = UnsupportedType;
assert(false && "Unsupported type.");
}
}

return status;
auto batch_to_space_float_lambda =
[](const core::OMRuntimeShape &input1_shape, const float *input1_data,
const core::OMRuntimeShape &input2_shape, const int32_t *block_shape_data,
const core::OMRuntimeShape &input3_shape, const int32_t *crops_data,
const core::OMRuntimeShape &output_shape, float *output_data) {
return pal::SpaceToBatchND<float>(input1_shape, input1_data, input2_shape, block_shape_data,
input3_shape, crops_data, output_shape, output_data);
};

return execute_spaces_batches_nd_common(execute_args, batch_to_space_float_lambda);
}
Loading

0 comments on commit dd8f03a

Please sign in to comment.