Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft: [onert] Remove tensor layout method #13797

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions runtime/onert/backend/acl_common/IACLTensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ size_t IACLTensor::calcOffset(const ir::Coordinates &coords) const
return info()->offset_element_in_bytes(acl_coords);
}

ir::Layout IACLTensor::layout() const { return acl_common::asRuntimeLayout(info()->data_layout()); }

ir::DataType IACLTensor::data_type() const
{
return acl_common::asRuntimeDataType(info()->data_type());
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/backend/acl_common/IACLTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ class IACLTensor : public ITensor
uint8_t *buffer() const final { return handle()->buffer(); }
size_t total_size() const final { return info()->total_size(); }
size_t calcOffset(const ir::Coordinates &coords) const final;
ir::Layout layout() const final;
ir::DataType data_type() const final;
float data_scale() const override;
int32_t data_zero_point() const override;
Expand Down
6 changes: 0 additions & 6 deletions runtime/onert/backend/cpu/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -260,12 +260,6 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI

for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}

auto tensor = _tensor_reg->getNativeTensor(ind);
if (tensor)
{
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/backend/cpu/ops/OperationUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,6 @@ std::vector<int32_t> getReducerAxes(const IPortableTensor *axes)
std::vector<int32_t> ret;

auto axes_vals = (axes->getShape().rank() == 0) ? 1 : axes->getShape().dim(0);
assert(axes->layout() == ir::Layout::NHWC);
assert(static_cast<size_t>(axes_vals) == axes->getShape().num_elements());
switch (axes->data_type())
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/cpu/ops/OperationUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,6 @@ inline nnfw::cker::Shape getShape(const IPortableTensor *tensor)
return nnfw::cker::Shape();

const ir::Shape &shape = tensor->get_info().shape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::cker::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
6 changes: 0 additions & 6 deletions runtime/onert/backend/ruy/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,6 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI

for (const auto &ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}

auto tensor = _tensor_reg->getNativeTensor(ind);
if (tensor)
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/ruy/ops/OperationUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ inline nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
return nnfw::ruy::Shape();

const ir::Shape &shape = tensor->get_info().shape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::ruy::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
5 changes: 0 additions & 5 deletions runtime/onert/backend/train/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,6 @@ std::unique_ptr<exec::train::TrainableFnSequence> KernelGenerator::generate(ir::

for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}
auto tensor = _tensor_reg->getNonConstTensor(ind);
if (tensor)
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/train/ops/OperationUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,6 @@ nnfw::cker::Shape getShape(const IPortableTensor *tensor)
assert(!tensor->is_dynamic() && "Dynamic tensor is not supported yet");

const ir::Shape &shape = tensor->get_info().shape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::cker::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
4 changes: 0 additions & 4 deletions runtime/onert/backend/train/ops/PoolLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,6 @@ class MaxPool2D final : public TrainingKernelRegistry
public:
void forward(const IPortableTensor *in, IPortableTensor *out)
{
assert(in->layout() == ir::Layout::NHWC);

auto out_shape = getShape(out);
auto out_data = getBuffer<float>(out);
auto arg_max_index = _arg_max_index.get();
Expand All @@ -90,8 +88,6 @@ class MaxPool2D final : public TrainingKernelRegistry

void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
{
assert(back_prop_out->layout() == ir::Layout::NHWC);

// activation backward
try
{
Expand Down
4 changes: 0 additions & 4 deletions runtime/onert/backend/train/optimizer/Optimizers.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,6 @@ class MockUpTensor : public IPortableTensor

template <typename T> const std::vector<T> &data() const { return _data; }

ir::Layout layout() const override { return ir::Layout::NHWC; }

private:
using ITensor::setShape;
using ITensor::set_dynamic;
Expand Down Expand Up @@ -89,8 +87,6 @@ class MockUpTrainableTensor : public backend::train::ITrainableTensor
return const_cast<uint8_t *>(_data.data());
}

ir::Layout layout() const override { return ir::Layout::NHWC; }

public:
std::vector<ITensor *> optVars() override
{
Expand Down
13 changes: 0 additions & 13 deletions runtime/onert/backend/trix/Convert.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,6 @@ namespace backend
namespace trix
{

data_layout convertDataLayout(const ir::Layout layout)
{
switch (layout)
{
case ir::Layout::NCHW:
return DATA_LAYOUT_NCHW;
case ir::Layout::NHWC:
return DATA_LAYOUT_NHWC;
default:
throw std::runtime_error("Unknown Layout");
}
}

data_type convertDataType(const ir::DataType type)
{
switch (type)
Expand Down
11 changes: 1 addition & 10 deletions runtime/onert/backend/trix/Convert.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

#include <backend/IPortableTensor.h>
#include <ir/DataType.h>
#include <ir/Layout.h>

#include <libnpuhost.h>
#include <type_traits>
Expand All @@ -31,14 +30,6 @@ namespace backend
namespace trix
{

/**
* @brief Convert type of layout from onert type to npu type
*
* @param layout Layout type in onert
* @return data_layout Layout type in npu
*/
data_layout convertDataLayout(const ir::Layout layout);

/**
* @brief Convert type of data from onert type to npu type
*
Expand All @@ -61,7 +52,7 @@ void setDataInfo(const std::vector<T *> &tensors, tensors_data_info *info)

for (uint32_t idx = 0; idx < info->num_info; ++idx)
{
info->info[idx].layout = convertDataLayout(tensors[idx]->layout());
info->info[idx].layout = DATA_LAYOUT_NHWC;
info->info[idx].type = convertDataType(tensors[idx]->data_type());
}
}
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/core/include/backend/ITensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ class ITensor
virtual uint8_t *buffer() const = 0;
virtual size_t total_size() const = 0;
virtual size_t calcOffset(const ir::Coordinates &coords) const = 0;
virtual ir::Layout layout() const = 0;
virtual ir::DataType data_type() const = 0;
virtual float data_scale() const = 0;
virtual int32_t data_zero_point() const = 0;
Expand Down
6 changes: 2 additions & 4 deletions runtime/onert/core/include/backend/basic/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ class Tensor : public IPortableTensor

public:
Tensor(const ir::OperandInfo &info, DynamicMemoryManager *dynamic_mem_mgr)
: IPortableTensor(info), _layout(ir::Layout::NHWC), _buffer(nullptr), _size(info.total_size()),
_num_references(0), _dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr)
: IPortableTensor(info), _buffer(nullptr), _size(info.total_size()), _num_references(0),
_dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr)
{
// DO NOTHING
}
Expand Down Expand Up @@ -71,7 +71,6 @@ class Tensor : public IPortableTensor

public:
uint8_t *buffer() const override { return _buffer; }
ir::Layout layout() const override { return _layout; }
void set_dynamic() override { _info.setDynamic(); }
bool applyShape(const ir::Shape &new_shape) override;

Expand Down Expand Up @@ -126,7 +125,6 @@ class Tensor : public IPortableTensor
void setShape(const ir::Shape &new_shape) override;

protected:
const ir::Layout _layout;
uint8_t *_buffer;
size_t _size;
int32_t _num_references;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ class TrainableTensor : public backend::train::ITrainableTensor

public:
uint8_t *buffer() const override { return _tensor.buffer(); }
ir::Layout layout() const override { return _tensor.layout(); }

public:
std::vector<ITensor *> optVars() override;
Expand Down
3 changes: 1 addition & 2 deletions runtime/onert/core/include/ir/Coordinates.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,7 @@ class Coordinates final
std::vector<int32_t> _coordinates;
};

Coordinates convertCoordinates(const Coordinates &from_coordinates, Layout from_layout,
Layout to_layout);
Coordinates convertCoordinates(const Coordinates &from_coordinates, const PermuteType &type);

} // namespace ir
} // namespace onert
Expand Down
8 changes: 7 additions & 1 deletion runtime/onert/core/include/ir/Shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,13 @@ struct Shape
inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); }
inline bool operator!=(const Shape &lhs, const Shape &rhs) { return lhs.dims() != rhs.dims(); }

Shape permuteShape(const Shape &shape, Layout frontend_layout, Layout backend_layout);
/**
* @brief Converts shape when its rank is 4
*
* @return Return a shape based on permutation type.
* If rank is not 4, input shape is returned without conversion.
*/
ir::Shape convertShape(const Shape &shape, const PermuteType &type);

/**
* @brief Find out if tha rank in this shape is "maybe" unspecified.
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/core/src/backend/builtin/IOTensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ void IOTensor::setTensor(IPortableTensor *tensor)
{
assert(tensor);
assert(tensor != this);
assert(tensor->layout() == _orig->layout()); // Changing layout is not considered yet
_tensor = tensor;
if (_info.shape() != tensor->getShape())
{
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/backend/builtin/IOTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class IOTensor : public IPortableTensor

public:
uint8_t *buffer() const override { return _tensor->buffer(); }
ir::Layout layout() const override { return _orig->layout(); }
ir::Layout layout() const { return _orig->layout(); }
void set_dynamic() override
{
_info.setDynamic();
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/backend/builtin/UserTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class UserTensor : public IPortableTensor

public:
uint8_t *buffer() const override { return _buffer; }
ir::Layout layout() const override { return _layout; }
ir::Layout layout() const { return _layout; }
void set_dynamic() override { _info.setDynamic(); }
void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); }
bool applyShape(const ir::Shape &) override;
Expand Down
25 changes: 12 additions & 13 deletions runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

#include "PermuteLayer.h"

#include "../../../exec/ShapeConverter.h"

#include <ruy/context.h> // from @ruy

namespace onert
Expand Down Expand Up @@ -110,7 +108,7 @@ void PermuteLayer::optimize()
const auto copy_len = loop_shape.dim(copy_axis) * data_size;
loop_shape.dim(copy_axis) = 1;

appendPermuteTasks(src, dst, loop_shape, copy_len);
appendPermuteTasks(src, dst, loop_shape, copy_len, permute_type);
}
}
else
Expand All @@ -121,7 +119,7 @@ void PermuteLayer::optimize()
const auto loop_shape = src_tensor.getShape();
const auto copy_len = data_size;

appendPermuteTasks(src, dst, loop_shape, copy_len);
appendPermuteTasks(src, dst, loop_shape, copy_len, permute_type);
}
});
};
Expand All @@ -136,11 +134,12 @@ void PermuteLayer::optimize()
}

void PermuteLayer::appendPermuteTasks(const ITensor *src_tensor, ITensor *dst_tensor,
const ir::Shape &loop_shape, size_t size)
const ir::Shape &loop_shape, size_t size,
const ir::PermuteType &permute_type)
{
size_t distributed_dim = 0;
auto src_shape = src_tensor->getShape();
if (src_tensor->layout() == dst_tensor->layout())
if (permute_type == ir::PermuteType::COPY)
{
for (int i = 1; i < src_shape.rank() - 1; ++i)
{
Expand All @@ -165,7 +164,8 @@ void PermuteLayer::appendPermuteTasks(const ITensor *src_tensor, ITensor *dst_te
start_coords.set(distributed_dim, start);
int end = start + (distributed_dim_val - start) / (thread_count - i);
one_thread_loop_shape.dim(distributed_dim) = end - start;
tasks.emplace_back(*src_tensor, *dst_tensor, start_coords, one_thread_loop_shape, size);
tasks.emplace_back(*src_tensor, *dst_tensor, permute_type, start_coords, one_thread_loop_shape,
size);
start = end;
}
assert(tasks.size() >= 1);
Expand Down Expand Up @@ -201,14 +201,14 @@ void PermuteLayer::run()
{
auto dst_tensor = _dst_tensors.at(i);
auto src_tensor = _src_tensors.at(i);
auto permute_type = _permute_types.at(i);
if (src_tensor->is_dynamic() || dst_tensor->is_dynamic())
{
// getting output shape
auto src_shape = src_tensor->getShape();

// set output shape and output buffer
ir::Shape new_shape =
exec::convertShape(src_shape, src_tensor->layout(), dst_tensor->layout());
ir::Shape new_shape = ir::convertShape(src_shape, permute_type);

try
{
Expand All @@ -225,8 +225,7 @@ void PermuteLayer::run()
throw;
}
}
assert(exec::convertShape(src_tensor->getShape(), src_tensor->layout(), dst_tensor->layout()) ==
dst_tensor->getShape());
assert(ir::convertShape(src_tensor->getShape(), permute_type) == dst_tensor->getShape());
}
assert(_src_tensors.size() == _dst_tensors.size());
assert(_src_tensors.size() == _src_tensors_offsets.size());
Expand Down Expand Up @@ -266,7 +265,7 @@ void PermuteLayer::run()
// If dst is subtensor, we have to use clEnqueueMapBuffer instead of clEnqueueWirteBuffer
else if (dst->needMemoryMap() && !dst->is_subtensor())
{
if (!src->has_padding() && !dst->has_padding() && src->layout() == dst->layout())
if (!src->has_padding() && !dst->has_padding() && permute_type == ir::PermuteType::COPY)
{
// This is more effective than multi-threading
src->access([&](backend::ITensor &) { dst->enqueueWriteBuffer(src->buffer(), false); });
Expand All @@ -282,7 +281,7 @@ void PermuteLayer::run()
}
}
else if (src->needMemoryMap() && !src->is_subtensor() && !src->has_padding() &&
!dst->has_padding() && src->layout() == dst->layout())
!dst->has_padding() && permute_type == ir::PermuteType::COPY)
{
// This is more effective than multi-threading
assert(!dst->needMemoryMap());
Expand Down
Loading