Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[onert] Remove layout in ITensor #13839

Merged
merged 1 commit into from
Aug 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions runtime/onert/backend/acl_common/IACLTensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ size_t IACLTensor::calcOffset(const ir::Coordinates &coords) const
return info()->offset_element_in_bytes(acl_coords);
}

ir::Layout IACLTensor::layout() const { return acl_common::asRuntimeLayout(info()->data_layout()); }

ir::DataType IACLTensor::data_type() const
{
return acl_common::asRuntimeDataType(info()->data_type());
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/backend/acl_common/IACLTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ class IACLTensor : public ITensor
uint8_t *buffer() const final { return handle()->buffer(); }
size_t total_size() const final { return info()->total_size(); }
size_t calcOffset(const ir::Coordinates &coords) const final;
ir::Layout layout() const final;
ir::DataType data_type() const final;
float data_scale() const override;
int32_t data_zero_point() const override;
Expand Down
6 changes: 0 additions & 6 deletions runtime/onert/backend/cpu/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -260,12 +260,6 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI

for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}

auto tensor = _tensor_reg->getNativeTensor(ind);
if (tensor)
{
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/backend/cpu/ops/OperationUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,6 @@ std::vector<int32_t> getReducerAxes(const IPortableTensor *axes)
std::vector<int32_t> ret;

auto axes_vals = (axes->getShape().rank() == 0) ? 1 : axes->getShape().dim(0);
assert(axes->layout() == ir::Layout::NHWC);
assert(static_cast<size_t>(axes_vals) == axes->getShape().num_elements());
switch (axes->data_type())
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/cpu/ops/OperationUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,6 @@ inline nnfw::cker::Shape getShape(const IPortableTensor *tensor)
return nnfw::cker::Shape();

const ir::Shape &shape = tensor->get_info().shape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::cker::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
6 changes: 0 additions & 6 deletions runtime/onert/backend/ruy/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,6 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI

for (const auto &ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}

auto tensor = _tensor_reg->getNativeTensor(ind);
if (tensor)
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/ruy/ops/OperationUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ inline nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
return nnfw::ruy::Shape();

const ir::Shape &shape = tensor->get_info().shape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::ruy::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
5 changes: 0 additions & 5 deletions runtime/onert/backend/train/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,6 @@ std::unique_ptr<exec::train::TrainableFnSequence> KernelGenerator::generate(ir::

for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}
auto tensor = _tensor_reg->getNonConstTensor(ind);
if (tensor)
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/train/ops/OperationUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,6 @@ nnfw::cker::Shape getShape(const IPortableTensor *tensor)
assert(!tensor->is_dynamic() && "Dynamic tensor is not supported yet");

const ir::Shape &shape = tensor->get_info().shape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::cker::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
4 changes: 0 additions & 4 deletions runtime/onert/backend/train/ops/PoolLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,6 @@ class MaxPool2D final : public TrainingKernelRegistry
public:
void forward(const IPortableTensor *in, IPortableTensor *out)
{
assert(in->layout() == ir::Layout::NHWC);

auto out_shape = getShape(out);
auto out_data = getBuffer<float>(out);
auto arg_max_index = _arg_max_index.get();
Expand All @@ -90,8 +88,6 @@ class MaxPool2D final : public TrainingKernelRegistry

void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
{
assert(back_prop_out->layout() == ir::Layout::NHWC);

// activation backward
try
{
Expand Down
4 changes: 0 additions & 4 deletions runtime/onert/backend/train/optimizer/Optimizers.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,6 @@ class MockUpTensor : public IPortableTensor

template <typename T> const std::vector<T> &data() const { return _data; }

ir::Layout layout() const override { return ir::Layout::NHWC; }

private:
using ITensor::setShape;
using ITensor::set_dynamic;
Expand Down Expand Up @@ -89,8 +87,6 @@ class MockUpTrainableTensor : public backend::train::ITrainableTensor
return const_cast<uint8_t *>(_data.data());
}

ir::Layout layout() const override { return ir::Layout::NHWC; }

public:
std::vector<ITensor *> optVars() override
{
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/backend/trix/Convert.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ void setDataInfo(const std::vector<T *> &tensors, tensors_data_info *info)

for (uint32_t idx = 0; idx < info->num_info; ++idx)
{
info->info[idx].layout = convertDataLayout(tensors[idx]->layout());
info->info[idx].layout = DATA_LAYOUT_NHWC;
info->info[idx].type = convertDataType(tensors[idx]->data_type());
}
}
Expand Down
6 changes: 0 additions & 6 deletions runtime/onert/backend/xnnpack/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,12 +70,6 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI

for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
{
auto portable_tensor = _tensor_reg->getPortableTensor(ind);
if (portable_tensor)
{
assert(portable_tensor->layout() == ir::Layout::NHWC);
}

auto tensor = _tensor_reg->getNativeTensor(ind);
if (tensor)
{
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/xnnpack/ops/ConvolutionLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,6 @@ void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTe
_activation = activation;
_output = output;

// TODO Support not nhwc layer
assert(_input->layout() == ir::Layout::NHWC);

assert(_activation == ir::Activation::NONE || _activation == ir::Activation::RELU ||
_activation == ir::Activation::RELU1 || _activation == ir::Activation::RELU6);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,6 @@ void DepthwiseConvolutionLayer::configure(
_activation = activation;
_output = output;

// TODO Support not nhwc layer
assert(_input->layout() == ir::Layout::NHWC);

assert(_activation == ir::Activation::NONE || _activation == ir::Activation::RELU ||
_activation == ir::Activation::RELU1 || _activation == ir::Activation::RELU6);
}
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/xnnpack/ops/FullyConnectedLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,6 @@ void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortabl
_activation = activation;
_output = output;

// TODO Support not nhwc layer
assert(_input->layout() == ir::Layout::NHWC);

assert(_activation == ir::Activation::NONE || _activation == ir::Activation::RELU ||
_activation == ir::Activation::RELU1 || _activation == ir::Activation::RELU6);
}
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/core/include/backend/ITensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ class ITensor
virtual uint8_t *buffer() const = 0;
virtual size_t total_size() const = 0;
virtual size_t calcOffset(const ir::Coordinates &coords) const = 0;
virtual ir::Layout layout() const = 0;
virtual ir::DataType data_type() const = 0;
virtual float data_scale() const = 0;
virtual int32_t data_zero_point() const = 0;
Expand Down
6 changes: 2 additions & 4 deletions runtime/onert/core/include/backend/basic/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ class Tensor : public IPortableTensor

public:
Tensor(const ir::OperandInfo &info, DynamicMemoryManager *dynamic_mem_mgr)
: IPortableTensor(info), _layout(ir::Layout::NHWC), _buffer(nullptr), _size(info.total_size()),
_num_references(0), _dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr)
: IPortableTensor(info), _buffer(nullptr), _size(info.total_size()), _num_references(0),
_dynamic_mem_mgr(dynamic_mem_mgr), _allocator(nullptr)
{
// DO NOTHING
}
Expand Down Expand Up @@ -71,7 +71,6 @@ class Tensor : public IPortableTensor

public:
uint8_t *buffer() const override { return _buffer; }
ir::Layout layout() const override { return _layout; }
void set_dynamic() override { _info.setDynamic(); }
bool applyShape(const ir::Shape &new_shape) override;

Expand Down Expand Up @@ -126,7 +125,6 @@ class Tensor : public IPortableTensor
void setShape(const ir::Shape &new_shape) override;

protected:
const ir::Layout _layout;
uint8_t *_buffer;
size_t _size;
int32_t _num_references;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ class TrainableTensor : public backend::train::ITrainableTensor

public:
uint8_t *buffer() const override { return _tensor.buffer(); }
ir::Layout layout() const override { return _tensor.layout(); }

public:
std::vector<ITensor *> optVars() override;
Expand Down
1 change: 0 additions & 1 deletion runtime/onert/core/src/backend/builtin/IOTensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ void IOTensor::setTensor(IPortableTensor *tensor)
{
assert(tensor);
assert(tensor != this);
assert(tensor->layout() == _orig->layout()); // Changing layout is not considered yet
_tensor = tensor;
if (_info.shape() != tensor->getShape())
{
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/backend/builtin/IOTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class IOTensor : public IPortableTensor

public:
uint8_t *buffer() const override { return _tensor->buffer(); }
ir::Layout layout() const override { return _orig->layout(); }
ir::Layout layout() const { return _orig->layout(); }
void set_dynamic() override
{
_info.setDynamic();
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/backend/builtin/UserTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class UserTensor : public IPortableTensor

public:
uint8_t *buffer() const override { return _buffer; }
ir::Layout layout() const override { return _layout; }
ir::Layout layout() const { return _layout; }
void set_dynamic() override { _info.setDynamic(); }
void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); }
bool applyShape(const ir::Shape &) override;
Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/core/src/backend/builtin/kernel/PermuteLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ void PermuteLayer::run()
// If dst is subtensor, we have to use clEnqueueMapBuffer instead of clEnqueueWirteBuffer
else if (dst->needMemoryMap() && !dst->is_subtensor())
{
if (!src->has_padding() && !dst->has_padding() && src->layout() == dst->layout())
if (!src->has_padding() && !dst->has_padding() && permute_type == ir::PermuteType::COPY)
{
// This is more effective than multi-threading
src->access([&](backend::ITensor &) { dst->enqueueWriteBuffer(src->buffer(), false); });
Expand All @@ -281,7 +281,7 @@ void PermuteLayer::run()
}
}
else if (src->needMemoryMap() && !src->is_subtensor() && !src->has_padding() &&
!dst->has_padding() && src->layout() == dst->layout())
!dst->has_padding() && permute_type == ir::PermuteType::COPY)
{
// This is more effective than multi-threading
assert(!dst->needMemoryMap());
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/exec/EdgeTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class EdgeTensor : public backend::IPortableTensor
~EdgeTensor() = default;

uint8_t *buffer() const override { return _buffer.get(); }
ir::Layout layout() const override { return _layout; }
ir::Layout layout() const { return _layout; }
void set_dynamic() override { _info.setDynamic(); }
bool applyShape(const ir::Shape &new_shape) override;
void setShape(const ir::Shape &new_shape) override { _info.shape(new_shape); }
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/core/src/exec/IPermuteFunction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,6 @@ using namespace onert;
inline nnfw::cker::Shape getShape(const backend::ITensor *tensor)
{
const ir::Shape shape = tensor->getShape();

assert(tensor->layout() == ir::Layout::NHWC);

auto rank = shape.rank();
nnfw::cker::Shape ret(rank);
auto data = ret.DimsData();
Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/core/src/exec/IPermuteFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class IPermuteFunction : public IFunction
// Now there is no case where both src and dst have cl buffer.
assert(!src->needMemoryMap());

if (!src->has_padding() && !dst->has_padding() && src->layout() == dst->layout())
if (!src->has_padding() && !dst->has_padding() && permute_type == ir::PermuteType::COPY)
{
src->access([&](backend::ITensor &) { dst->enqueueWriteBuffer(src->buffer(), false); });
}
Expand All @@ -110,7 +110,7 @@ class IPermuteFunction : public IFunction
}
}
else if (src->needMemoryMap() && !src->is_subtensor() && !src->has_padding() &&
!dst->has_padding() && src->layout() == dst->layout())
!dst->has_padding() && permute_type == ir::PermuteType::COPY)
{
assert(!dst->needMemoryMap());
dst->access([&](backend::ITensor &) { src->enqueueReadBuffer(dst->buffer(), true); });
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/exec/IPermuteFunction.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class MockUpTensor : public ITensor

uint8_t *buffer() const override { return _data; }

ir::Layout layout() const override { return _layout; }
ir::Layout layout() const { return _layout; }
ir::DataType data_type() const override { return _type_info.type(); }
float data_scale() const override { return _type_info.scale(); }
int32_t data_zero_point() const override { return _type_info.zero_point(); }
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/exec/feature/MockTensor.test.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ template <typename T> class MockTensor : public onert::backend::ITensor

public: // DUMMY methods
size_t total_size() const override { return 0; }
onert::ir::Layout layout() const override { return _layout; }
onert::ir::Layout layout() const { return _layout; }
onert::ir::DataType data_type() const override { return onert::ir::DataType::UINT8; }
float data_scale() const override { return 0; }
int32_t data_zero_point() const override { return 0; }
Expand Down
2 changes: 0 additions & 2 deletions runtime/onert/core/src/exec/feature/nchw/Reader.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,6 @@ template <typename T> class Reader : public feature::Reader<T>
Reader(backend::ITensor *tensor)
: _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()}
{
assert(tensor->layout() == ir::Layout::NCHW);

const auto start_offset = tensor->calcOffset({0, 0, 0, 0});
auto shape = tensor->getShape();
_strides.W = shape.dim(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;
Expand Down
2 changes: 0 additions & 2 deletions runtime/onert/core/src/exec/feature/nhwc/Reader.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ template <typename T> class Reader : public feature::Reader<T>
Reader(const backend::ITensor *tensor)
: _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()}
{
assert(tensor->layout() == ir::Layout::NHWC);

const auto start_offset = tensor->calcOffset({0, 0, 0, 0});
auto shape = tensor->getShape();
_strides.C = shape.dim(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset;
Expand Down