Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[onert] Optimize Bias Grad Computation #12673

Merged
merged 1 commit into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
220 changes: 220 additions & 0 deletions compute/cker/include/cker/eigen/redux_functor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __NNFW_CKER_EIGEN_REDUX_FUNCTOR_H__
#define __NNFW_CKER_EIGEN_REDUX_FUNCTOR_H__

#include <cker/operation/Helper/Tensor.h>

// From tensorflow/core/kernels/redux_functor.h
namespace nnfw
{
namespace cker
{
namespace functor
{

// Compute reduction over outer dimensions.
// Example:
// input: [D1, D2, ... , DN]
// ->
// output: [Di, ... , DN] where i belongs to set [1,N]
template <typename Device, typename InputT, typename AccumT, typename OutputT,
typename BinaryFunctor>
struct ReduceOuterDimensions
{
ReduceOuterDimensions() {}

template <int num_dims>
void operator()(const Device &device, const Eigen::DSizes<Eigen::Index, num_dims> &input_dims,
const Tensor &input, Tensor *output) const
{
// Compute inner and outer dim after reshaping into 2d tensor.
const int num_output_dims = output->shape.DimensionsCount();
auto output_dims = output->template flat<OutputT>().dimensions();

Eigen::Index inner_dim = 1, outer_dim = 1;
for (int i = 0; i < num_dims - num_output_dims; ++i)
outer_dim *= input_dims[i];
for (int i = num_dims - num_output_dims; i < num_dims; ++i)
inner_dim *= input_dims[i];

if (1 == outer_dim)
{
// Nothing to do but passing input to output.
output->template flat<OutputT>() =
input.template flat<InputT>().template cast<OutputT>().reshape(output_dims);
return;
}

// Get device thread num.
const Eigen::Index num_threads = device.numThreads();

// If the inner dim parallelism is large enough
// TODO(ezhulenev): There seems to be no benefits in going this route. Check
// if this can be improved, or use better heuristic?
if (inner_dim > num_threads * 32)
{
// Do not create more blocks than there are threads in a pool.
const Eigen::Index num_blocks = num_threads;

// Block size along the outer dimension.
const Eigen::Index inner_block_size = Eigen::divup(inner_dim, num_blocks);
const InputT *input_data = input.template flat<InputT>().data();

// Allocate temporary buffer for partial reductions.
Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index> buffer({inner_dim});
buffer.setZero();
AccumT *buffer_data = buffer.data();

using Buffer =
Eigen::TensorMap<Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index>, Eigen::Unaligned>;

using Input = Eigen::TensorMap<Eigen::Tensor<const InputT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;

const auto compute = [inner_dim, outer_dim, inner_block_size, input_data,
buffer_data](Eigen::Index start, Eigen::Index limit) -> void {
Eigen::Index inner_dim_start = start * inner_block_size;
Eigen::Index inner_dim_limit = limit * inner_block_size;
inner_dim_limit = std::min(inner_dim, inner_dim_limit);
Eigen::Index my_job_len = inner_dim_limit - inner_dim_start;

const InputT *my_job_start = input_data + inner_dim_start;
Buffer buf(buffer_data + inner_dim_start, my_job_len);

for (Eigen::Index i = 0; i < outer_dim; ++i)
{
auto in = Input(my_job_start + i * inner_dim, my_job_len);
auto cast = in.template cast<AccumT>();
buf =
Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf), const decltype(cast)>(
buf, cast);
}
};

// Compute cost of reducing a single block.
const Eigen::Index compute_size = outer_dim * inner_block_size;
const Eigen::Index compute_input_bytes = compute_size * sizeof(InputT);
const Eigen::TensorOpCost cost(compute_input_bytes,
0, // We'll be mostly writing to L1, assume store cost is 0
compute_size *
Eigen::internal::functor_traits<BinaryFunctor>::Cost);

device.parallelFor(num_blocks, cost, compute);

// Write final result to the output.
output->template flat<OutputT>() = buffer.template cast<OutputT>().reshape(output_dims);
}
else
{
// Compute block size along the outer dimension for efficiency.
const Eigen::Index parallel_cell_size = inner_dim;
const Eigen::Index total_workload = outer_dim * inner_dim;
const Eigen::Index max_parallelism = total_workload / parallel_cell_size;

const Eigen::Index min_block_workload = 2000;
const Eigen::Index min_block_size = Eigen::divup(min_block_workload, parallel_cell_size);
const Eigen::Index max_num_blocks =
std::min(max_parallelism, Eigen::divup(total_workload, min_block_size));

// Do not create more blocks than there are threads in a pool.
const Eigen::Index num_blocks = std::min(max_num_blocks, num_threads);

// Block size along the outer dimension.
const Eigen::Index outer_block_size = Eigen::divup(outer_dim, num_blocks);

const InputT *input_data = input.template flat<InputT>().data();

// Allocate temporary buffer for partial reductions.
std::vector<AccumT> buffer(num_blocks * inner_dim);
AccumT *buffer_data = buffer.data();

using Buffer =
Eigen::TensorMap<Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index>, Eigen::Unaligned>;

using Input = Eigen::TensorMap<Eigen::Tensor<const InputT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;

const auto compute = [inner_dim, outer_block_size, buffer_data, input_data,
outer_dim](Eigen::Index start, Eigen::Index limit) -> void {
Eigen::Index outer_dim_start = start * outer_block_size;
Eigen::Index outer_dim_limit = limit * outer_block_size;
outer_dim_limit = std::min(outer_dim, outer_dim_limit);

Buffer buf(buffer_data + start * inner_dim, inner_dim);
for (Eigen::Index i = outer_dim_start; i < outer_dim_limit; ++i)
{
auto in = Input(input_data + i * inner_dim, inner_dim);
auto cast = in.template cast<AccumT>();
buf =
Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf), const decltype(cast)>(
buf, cast);
}
};

// Compute cost of reducing a single block.
const Eigen::Index compute_size = outer_block_size * inner_dim;
const Eigen::Index compute_input_bytes = compute_size * sizeof(InputT);
const Eigen::TensorOpCost cost(compute_input_bytes,
0, // We'll be mostly writing to L1, assume store cost is 0
compute_size *
Eigen::internal::functor_traits<BinaryFunctor>::Cost);

device.parallelFor(num_blocks, cost, compute);

// Aggregate partial results from temporary buffer into first block.
auto buf0 = Buffer(buffer_data, inner_dim);
// Just sum the buffer up, as inner dimensions is not large in this case.
for (int i = 1; i < num_blocks; ++i)
{
auto buf = Buffer(buffer_data + i * inner_dim, inner_dim);
buf0 = Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf0), const decltype(buf)>(
buf0, buf);
}
// Write final result to the output.
output->template flat<OutputT>() = buf0.template cast<OutputT>().reshape(output_dims);
}
}
};

void biasReductionHelper(float *input_backprop_buffer, const Shape &input_backprop_shape,
float *bias_grad_buffer, const Shape &bias_grad_shape)
Comment on lines +193 to +194
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is from @YongseopKim 's suggestion.

{
assert(input_backprop_buffer);
assert(bias_grad_buffer);

const nnfw::cker::functor::ReduceOuterDimensions<Eigen::ThreadPoolDevice, float, float, float,
Eigen::internal::scalar_sum_op<float>>
redux;

const Tensor input_backprop_t{input_backprop_shape, static_cast<void *>(input_backprop_buffer)};

Tensor bias_grad_t{bias_grad_shape, bias_grad_buffer};

int outer = 1;
for (int i = 0; i < input_backprop_shape.DimensionsCount() - 1; ++i)
outer *= input_backprop_shape.Dims(i);
int inner = input_backprop_shape.Dims(input_backprop_shape.DimensionsCount() - 1);

redux(*eigen_support::GetThreadPoolDevice(), Eigen::DSizes<Eigen::Index, 2>{outer, inner},
input_backprop_t, &bias_grad_t);
}

} // namespace functor
} // namespace cker
} // namespace nnfw

#endif // __NNFW_CKER_EIGEN_REDUX_FUNCTOR_H__
15 changes: 1 addition & 14 deletions runtime/onert/backend/train/ops/ConvolutionLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
#include "OperationUtils.h"

#include <cker/operation/Conv.h>
#include <cker/operation/Reduce.h>
#include <cker/operation/Transpose.h>
#include <cker/train/operation/Conv.h>
#include <cker/train/operation/ReLU.h>
Expand Down Expand Up @@ -196,20 +195,8 @@ void ConvolutionLayer::backwardFloat32()
// Calculate gradient for bias
if (_bias)
{
// TODO Use optimized kernel
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This commit resolves this TODO item.

assert(_grad_bias);
std::vector<int32_t> axes{0, 1, 2};
nnfw::cker::Reduce reduce_kernel;
reduce_kernel.prepare(backprop_act->getShape().rank(), axes.size());
bool result = reduce_kernel.ReduceGeneric<float>(
getShape(backprop_act), getBuffer<float>(backprop_act), getShape(_grad_bias),
getBuffer<float>(_grad_bias), axes, false /* keep_dims */, 0.f,
[](const float current, const float in) -> float { return in + current; });

if (!result)
{
throw std::runtime_error{"train ConvolutionLayer: Fail to caculate bias gradient"};
}
biasGrad(backprop_act, _grad_bias);
}
}

Expand Down
12 changes: 1 addition & 11 deletions runtime/onert/backend/train/ops/DepthwiseConvolutionLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
#include "OperationUtils.h"

#include <cker/train/operation/ReLU.h>
#include <cker/operation/Reduce.h>

namespace onert
{
Expand Down Expand Up @@ -170,17 +169,8 @@ void DepthwiseConvolutionLayer::backwardFloat32()
// Calculate gradient for bias
if (_bias)
{
// TODO Use optimized kernel
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This commit resolves this TODO item.

assert(_grad_bias);
std::vector<int32_t> axes{0, 1, 2};
nnfw::cker::Reduce reduce_kernel;
reduce_kernel.prepare(backprop_act->getShape().rank(), axes.size());
bool result = reduce_kernel.ReduceGeneric<float>(
getShape(backprop_act), getBuffer<float>(backprop_act), getShape(_grad_bias),
getBuffer<float>(_grad_bias), axes, false /* keep_dims */, 0.f,
[](const float current, const float in) -> float { return in + current; });
if (!result)
throw std::runtime_error{"train DepthwiseConvolutionLayer: Fail to calculate bias gradient"};
biasGrad(backprop_act, _grad_bias);
}
}

Expand Down
15 changes: 15 additions & 0 deletions runtime/onert/backend/train/ops/OperationUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "OperationUtils.h"

#include <cker/eigen/redux_functor.h>
#include <cker/train/operation/ReLU.h>
#include <cker/train/operation/ReLU6.h>

Expand Down Expand Up @@ -64,6 +65,20 @@ const IPortableTensor *backpropActivation(const ir::Activation &activation,
return output_backprop;
}

void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
{
assert(bias_grad);

nnfw::cker::Shape input_backprop_shape = getShape(input_backprop);
float *input_backprop_buffer = reinterpret_cast<float *>(input_backprop->buffer());

nnfw::cker::Shape bias_grad_shape = getShape(bias_grad);
float *bias_grad_buffer = getBuffer<float>(bias_grad);

nnfw::cker::functor::biasReductionHelper(input_backprop_buffer, input_backprop_shape,
bias_grad_buffer, bias_grad_shape);
}

} // namespace ops
} // namespace train
} // namespace backend
Expand Down
9 changes: 9 additions & 0 deletions runtime/onert/backend/train/ops/OperationUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,15 @@ const IPortableTensor *backpropActivation(const ir::Activation &activation,
const IPortableTensor *input_backprop,
IPortableTensor *output_backprop);

/**
* @brief backpropagate bias
*
* @param input_backprop backward direction's output of next layer
* In other words, incoming gradient to current layer
* @param bias_grad gradient tensor of bias
*/
void biasGrad(const IPortableTensor *input_backprop, IPortableTensor *bias_grad);

} // namespace ops
} // namespace train
} // namespace backend
Expand Down
Loading