Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix several svace defects #13320

Merged
merged 17 commits into from
Jul 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class OMWeightOnlyFormatReader
char *buffer(const uint32_t tensor_index);

private:
char *_wof_ptr;
char *_wof_ptr = nullptr;
};

} // namespace reader
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,7 @@ class KernelCustomExecuteRegistry
}
const auto builder_id_offset = size_t(core::OMBuilderID::BuiltinOperatorsSize);
builder_id_opcode -= builder_id_offset - 1;
if (builder_id_opcode < 0)
{
*execute_func = nullptr;
return UnknownError;
}

*execute_func = _operator_execute[builder_id_opcode];
return Ok;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,7 @@ class KernelCustomConfigureRegistry
}
const auto builder_id_offset = size_t(core::OMBuilderID::BuiltinOperatorsSize);
builder_id_opcode -= builder_id_offset - 1;
if (builder_id_opcode < 0)
{
*configure_func = nullptr;
return UnknownError;
}

*configure_func = _operator_configure[builder_id_opcode];
return Ok;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "core/OMKernelData.h"
#include "core/OMRuntimeShape.h"
#include <cmath>
#include <iostream>
namespace onert_micro
{
namespace execute
Expand Down Expand Up @@ -73,6 +74,11 @@ OMStatus L2Pool(const core::Pool2DParams &params, const core::OMRuntimeShape &in
}
}
assert(filter_count != 0);
if (filter_count == 0)
{
std::cerr << "filter_count is zero" << std::endl;
return FailedCheckCondition;
}
const float l2pool_result = std::sqrt(sum_squares / filter_count);
output_data[offset(output_shape.dimsData(), batch, out_y, out_x, channel)] =
activationFunctionWithMinMax(l2pool_result, params.activation_min,
Expand Down
4 changes: 2 additions & 2 deletions onert-micro/onert-micro/include/pal/common/PALSplit.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ OMStatus Split(const core::SplitParams &params, const core::OMRuntimeShape &inpu
}

assert(input_data != nullptr);
for (uint32_t k = 0; k < outer_size; ++k)
for (int64_t k = 0; k < outer_size; ++k)
{
for (uint32_t i = 0; i < output_count; ++i)
{
Expand All @@ -65,7 +65,7 @@ OMStatus Split(const core::SplitParams &params, const core::OMRuntimeShape &inpu
const auto copy_size = output_shape.dims(axis_value) * base_inner_size;
T *output_ptr = output_data + k * copy_size;
assert(output_ptr != nullptr);
for (uint32_t j = 0; j < copy_size; ++j)
for (int64_t j = 0; j < copy_size; ++j)
output_ptr[j] = input_data[j];
input_data += copy_size;
}
Expand Down
13 changes: 13 additions & 0 deletions onert-micro/onert-micro/include/pal/common/PALUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,13 +230,22 @@ bool ReduceDimensionsForBroadcast(const core::OMRuntimeShape &input1_shape,
bool broadcast_input1 = false;
bool broadcast_input2 = false;
bool first_nonunit = true;

if (input1_shape.dimensionsCount() < 0 || input2_shape.dimensionsCount() < 0)
{
return false;
}
const size_t num_input1_dims = input1_shape.dimensionsCount();
const size_t num_input2_dims = input2_shape.dimensionsCount();
const int32_t *input1_dims = input1_shape.dimsData();
const int32_t *input2_dims = input2_shape.dimsData();
const size_t num_common_dims = std::min(num_input1_dims, num_input2_dims);
for (size_t i = 1; i <= num_common_dims; i++)
{
if (input1_dims[num_input1_dims - i] < 0 || input2_dims[num_input2_dims - i] < 0)
{
return false;
}
const size_t input1_dim = input1_dims[num_input1_dims - i];
const size_t input2_dim = input2_dims[num_input2_dims - i];
if (input1_dim == 0 || input2_dim == 0)
Expand Down Expand Up @@ -294,6 +303,8 @@ bool ReduceDimensionsForBroadcast(const core::OMRuntimeShape &input1_shape,
}
for (size_t i = 0; i < num_input1_dims - num_input2_dims; i++)
{
if (input1_dims[i] < 0)
return false;
const size_t input1_dim = input1_dims[i];
if (input1_dim == 0)
{
Expand All @@ -311,6 +322,8 @@ bool ReduceDimensionsForBroadcast(const core::OMRuntimeShape &input1_shape,
}
for (size_t i = 0; i < num_input2_dims - num_input1_dims; i++)
{
if (input2_dims[i] < 0)
return false;
const size_t input2_dim = input2_dims[i];
if (input2_dim == 0)
{
Expand Down
2 changes: 2 additions & 0 deletions onert-micro/onert-micro/include/pal/mcu/PALAddN.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,15 @@ OMStatus AddN<int8_t>(const size_t flat_size, const size_t num_inputs,
const int8_t *const *input_data, int8_t *output_data)
{
assert(false && "Not IMPL yet");
return UnsupportedOp;
}

template <>
OMStatus AddN<int16_t>(const size_t flat_size, const size_t num_inputs,
const int16_t *const *input_data, int16_t *output_data)
{
assert(false && "Not IMPL yet");
return UnsupportedOp;
}

} // namespace pal
Expand Down
6 changes: 2 additions & 4 deletions onert-micro/onert-micro/src/core/OMRuntimeModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,8 @@ OMStatus OMRuntimeModule::importModel(const char *model_ptr, const OMConfig &con
return status;

// 4 - AllocDeallocPlan creation
status = import::OMExecutionPlanCreator::createExecutionPlan(runtime_storage, runtime_context,
runtime_allocator, config);
if (status != Ok)
return status;
import::OMExecutionPlanCreator::createExecutionPlan(runtime_storage, runtime_context,
runtime_allocator, config);
}
for (uint32_t i = 0; i < num_subgraph; ++i)
{
Expand Down
3 changes: 0 additions & 3 deletions onert-micro/onert-micro/src/core/memory/OMMemoryManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,6 @@ OMStatus OMMemoryManager::allocateMemory(uint32_t size, uint8_t **data)

*data = data_tmp;

if (*data == nullptr)
return UnknownError;

return Ok;
}

Expand Down
15 changes: 8 additions & 7 deletions onert-micro/onert-micro/src/core/memory/OMRuntimeAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "core/memory/OMMemoryManager.h"

#include "core/OMDataType.h"
#include <limits>

using namespace onert_micro::core::memory;
using namespace onert_micro;
Expand Down Expand Up @@ -66,7 +67,10 @@ OMStatus OMRuntimeAllocator::allocate(size_t kernel_index, OMRuntimeContext *con
const auto casted_num_elements = static_cast<uint32_t>(num_elements);
const auto type_size =
static_cast<uint32_t>(getOMDataTypeSize(onertMicroDatatype(tensor->type())));

if (casted_num_elements > std::numeric_limits<uint32_t>::max() / type_size)
{
return FailedCheckCondition;
}
// allocate data
uint8_t *allocated_data = nullptr;
assert(storage->getDataByTensorIndex(&allocated_data, tensor_index) == Ok &&
Expand Down Expand Up @@ -94,15 +98,14 @@ OMStatus OMRuntimeAllocator::deallocate(size_t kernel_index, OMRuntimeStorage *s
{
uint8_t *allocated_data = nullptr;
OMStatus status = storage->getDataByTensorIndex(&allocated_data, tensor_index);
assert(status == Ok); // note that status always 0

// To continue deallocate due to current tensor is not saved in storage
if (allocated_data == nullptr)
continue;
if (status != Ok)
return status;

status = OMMemoryManager::deallocateMemory(allocated_data);
if (status != Ok)
return status;
assert(status == Ok); // note that status always 0

status = storage->removeTensorFromTensorIndexToData(tensor_index);
if (status != Ok)
Expand Down Expand Up @@ -135,8 +138,6 @@ OMStatus OMRuntimeAllocator::allocateGraphInputs(OMRuntimeContext *context,
uint8_t *allocated_data = nullptr;
// First clear if already allocated
status = storage->getDataByTensorIndex(&allocated_data, tensor_index);
if (status != Ok)
return status;

OMMemoryManager::deallocateMemory(allocated_data);

Expand Down
4 changes: 0 additions & 4 deletions onert-micro/onert-micro/src/core/train/OMTrainingHandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,6 @@ OMStatus OMTrainingHandler::handleError(const OMConfig &config, OMRuntimeStorage
// Get calculated data
uint8_t *calculated_data = nullptr;
OMStatus status = forward_storage.getDataByTensorIndex(&calculated_data, forward_output_index);
if (status != Ok)
return status;
assert(calculated_data != nullptr);

// Get target data
Expand Down Expand Up @@ -221,8 +219,6 @@ OMStatus OMTrainingHandler::evaluateMetric(OMMetrics metric, void *metric_val,
// Get calculated data
uint8_t *calculated_data = nullptr;
OMStatus status = storage.getDataByTensorIndex(&calculated_data, forward_output_index);
if (status != Ok)
return status;
assert(calculated_data != nullptr);

// Get target data
Expand Down
3 changes: 0 additions & 3 deletions onert-micro/onert-micro/src/execute/OMRuntimeKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,6 @@ OMStatus onert_micro::execute::OMRuntimeKernel::getDataFromStorage(uint16_t op_i
continue;
status = storage.getDataByTensorIndex(&outputs_data[i], outputs_index[i]);

if (status != Ok)
return status;

if (storage.getKernelType(op_index) == core::Inplace)
{
outputs_data[i] = inputs_data[i];
Expand Down
2 changes: 1 addition & 1 deletion onert-micro/onert-micro/src/execute/kernels/Split.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ OMStatus onert_micro::execute::execute_kernel_CircleSplit(const OMExecuteArgs &e
OMRuntimeShape input_shape(input);
OMRuntimeShape output_shape(output);

int32_t axis_value = axis_data[0];
int32_t axis_value = utils::castInputData<int32_t>(axis_data)[0];
if (axis_value < 0)
{
axis_value += input_shape.dimensionsCount() + 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ OMStatus isInplaceOperation(const circle::Operator *op, core::OMRuntimeContext &
is_inplace = true;
break;
}
#if 0 // FIXME: Enable after custom operation is introduced
case circle::BuiltinOperator_CUSTOM:
{
core::OMBuilderCustomID custom_id;
Expand All @@ -83,10 +84,13 @@ OMStatus isInplaceOperation(const circle::Operator *op, core::OMRuntimeContext &
break;
default:
is_inplace = false;
break;
}
}
#endif
default:
is_inplace = false;
break;
}
return status;
}
Expand Down Expand Up @@ -212,9 +216,7 @@ OMStatus findInplaceOp(core::OMRuntimeStorage &storage, core::OMRuntimeContext &
auto cur_op = operators->operator[](i);

bool is_inplace = false;
status = isInplaceOperation(cur_op, context, is_inplace);
if (status != Ok)
return status;
isInplaceOperation(cur_op, context, is_inplace);

if (is_inplace == false)
continue;
Expand Down
Loading