Skip to content

Commit

Permalink
[compute/cker] Remove UNUSED_RELEASE macro (#14337)
Browse files Browse the repository at this point in the history
This commit removes UNUSED_RELEASE() macro in cker.

ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh <[email protected]>
  • Loading branch information
hseok-oh authored Nov 25, 2024
1 parent 848333c commit 46b27b2
Show file tree
Hide file tree
Showing 21 changed files with 87 additions and 148 deletions.
26 changes: 10 additions & 16 deletions compute/cker/include/cker/Shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@
#include <cassert>
#include <vector>

#define UNUSED_RELEASE(a) (void)(a)

namespace nnfw
{
namespace cker
Expand Down Expand Up @@ -219,20 +217,18 @@ class Shape
};
};

inline int MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2)
inline int MatchingDim(const Shape &shape1, int index1, [[maybe_unused]] const Shape &shape2,
[[maybe_unused]] int index2)
{
UNUSED_RELEASE(shape2);
UNUSED_RELEASE(index2);
assert(shape1.Dims(index1) == shape2.Dims(index2));
return shape1.Dims(index1);
}

template <typename... Args>
int MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2, Args... args)
int MatchingDim(const Shape &shape1, int index1, [[maybe_unused]] const Shape &shape2,
[[maybe_unused]] int index2, Args... args)
{
assert(shape1.Dims(index1) == shape2.Dims(index2));
UNUSED_RELEASE(shape2);
UNUSED_RELEASE(index2);
return MatchingDim(shape1, index1, args...);
}

Expand Down Expand Up @@ -305,9 +301,9 @@ template <typename... Ts> inline int MatchingFlatSize(const Shape &shape, Ts...
return shape.FlatSize();
}

inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, const Shape &check_shape_0)
inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim,
[[maybe_unused]] const Shape &check_shape_0)
{
UNUSED_RELEASE(check_shape_0);
const int dims_count = shape.DimensionsCount();
for (int i = 0; i < dims_count; ++i)
{
Expand All @@ -319,10 +315,10 @@ inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, const Shape
return FlatSizeSkipDim(shape, skip_dim);
}

inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, const Shape &check_shape_0,
inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim,
[[maybe_unused]] const Shape &check_shape_0,
const Shape &check_shape_1)
{
UNUSED_RELEASE(check_shape_0);
const int dims_count = shape.DimensionsCount();
for (int i = 0; i < dims_count; ++i)
{
Expand All @@ -338,12 +334,10 @@ inline int MatchingElementsSize(const Shape &shape, const Shape &check_shape_0,
const Shape &check_shape_1)
{
const int size_1 = shape.FlatSize();
const int size_2 = check_shape_0.FlatSize();
const int size_3 = check_shape_1.FlatSize();
[[maybe_unused]] const int size_2 = check_shape_0.FlatSize();
[[maybe_unused]] const int size_3 = check_shape_1.FlatSize();
assert(size_1 == size_2);
assert(size_2 == size_3);
UNUSED_RELEASE(size_2);
UNUSED_RELEASE(size_3);
return size_1;
}

Expand Down
4 changes: 2 additions & 2 deletions compute/cker/include/cker/Types.h
Original file line number Diff line number Diff line change
Expand Up @@ -541,7 +541,8 @@ struct GemmParams

// Validates self-consistency of GemmParams.
template <typename AccumScalar, typename DstScalar, QuantizationFlavor quantization_flavor>
void ValidateGemmParams(const GemmParams<AccumScalar, DstScalar, quantization_flavor> &params)
void ValidateGemmParams(
[[maybe_unused]] const GemmParams<AccumScalar, DstScalar, quantization_flavor> &params)
{
// Guard consistency of the quantized multiplier fields.
if (quantization_flavor == QuantizationFlavor::kFloatingPoint)
Expand Down Expand Up @@ -576,7 +577,6 @@ void ValidateGemmParams(const GemmParams<AccumScalar, DstScalar, quantization_fl
assert(!params.multiplier_fixedpoint_perchannel);
assert(!params.multiplier_exponent_perchannel);
}
UNUSED_RELEASE(params);
}

} // namespace cker
Expand Down
6 changes: 3 additions & 3 deletions compute/cker/include/cker/operation/ArgMinMax.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ namespace cker
{

template <typename T1, typename T2, typename Cmp>
void ArgMinMax(const Shape &input1_shape, const T1 *input1_data, const Shape &output_shape,
T2 *output_data, int32_t axis, const Cmp &cmp)
void ArgMinMax(const Shape &input1_shape, const T1 *input1_data,
[[maybe_unused]] const Shape &output_shape, T2 *output_data, int32_t axis,
const Cmp &cmp)
{
UNUSED_RELEASE(output_shape);
assert(input1_shape.DimensionsCount() > 0);
assert(input1_shape.DimensionsCount() - 1 == output_shape.DimensionsCount());
if (axis < 0)
Expand Down
9 changes: 3 additions & 6 deletions compute/cker/include/cker/operation/Concatenation.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,22 +39,20 @@ inline void Concatenation(const ConcatenationParams &params, const Shape *const
const int concat_dimensions = output_shape.DimensionsCount();
assert(axis < concat_dimensions);

int64_t concat_size = 0;
[[maybe_unused]] int64_t concat_size = 0;
for (int i = 0; i < inputs_count; i++)
{
assert(input_shapes[i]->DimensionsCount() == concat_dimensions);
for (int j = 0; j < concat_dimensions; j++)
{
if (j != axis)
{
auto dim_checked = MatchingDim(*input_shapes[i], j, output_shape, j);
UNUSED_RELEASE(dim_checked);
[[maybe_unused]] auto dim_checked = MatchingDim(*input_shapes[i], j, output_shape, j);
}
}
concat_size += input_shapes[i]->Dims(axis);
}
assert(concat_size == output_shape.Dims(axis));
UNUSED_RELEASE(concat_size);
int64_t outer_size = 1;
for (int i = 0; i < axis; ++i)
{
Expand Down Expand Up @@ -97,7 +95,7 @@ inline void ConcatenationWithScaling(const ConcatenationParams &params,
const int concat_dimensions = output_shape.DimensionsCount();
assert(axis <= concat_dimensions);

int64_t concat_size = 0;
[[maybe_unused]] int64_t concat_size = 0;
for (int i = 0; i < inputs_count; i++)
{
assert(input_shapes[i]->DimensionsCount() == concat_dimensions);
Expand All @@ -111,7 +109,6 @@ inline void ConcatenationWithScaling(const ConcatenationParams &params,
concat_size += input_shapes[i]->Dims(axis);
}
assert(concat_size == output_shape.Dims(axis));
UNUSED_RELEASE(concat_size);
int64_t outer_size = 1;
for (int i = 0; i < axis; ++i)
{
Expand Down
34 changes: 13 additions & 21 deletions compute/cker/include/cker/operation/FullyConnected.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,14 +128,12 @@ inline void FullyConnected(const FullyConnectedParams &params, const Shape &inpu

#endif // CKER_X86_PLATFORM

inline void FullyConnected(const FullyConnectedParams &params, const Shape &input_shape,
const uint8_t *input_data, const Shape &filter_shape,
const uint8_t *filter_data, const Shape &bias_shape,
const int32_t *bias_data, const Shape &output_shape,
uint8_t *output_data)
inline void FullyConnected(const FullyConnectedParams &params,
[[maybe_unused]] const Shape &input_shape, const uint8_t *input_data,
const Shape &filter_shape, const uint8_t *filter_data,
[[maybe_unused]] const Shape &bias_shape, const int32_t *bias_data,
const Shape &output_shape, uint8_t *output_data)
{
UNUSED_RELEASE(input_shape);
UNUSED_RELEASE(bias_shape);
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
Expand Down Expand Up @@ -185,8 +183,9 @@ inline void FullyConnected(const FullyConnectedParams &params, const Shape &inpu
inline void FullyConnectedHybrid(const FullyConnectedParams &params, const Shape &input_shape,
const float *input_data, const Shape &filter_shape,
const int8_t *filter_data, const Shape &, const float *bias_data,
const Shape &output_shape, float *output_data,
FCTempArena &temp_arena, ruy::Context *ruy_context)
[[maybe_unused]] const Shape &output_shape, float *output_data,
FCTempArena &temp_arena,
[[maybe_unused]] ruy::Context *ruy_context)
{
int total_input_size = input_shape.FlatSize();
const int input_size = filter_shape.Dims(1);
Expand Down Expand Up @@ -237,8 +236,6 @@ inline void FullyConnectedHybrid(const FullyConnectedParams &params, const Shape
MatrixBatchVectorMultiplyAccumulate(filter_data, num_units, input_size, quant_data,
scaling_factors_ptr, batch_size, output_data,
/*result_stride=*/1);
UNUSED_RELEASE(ruy_context);
UNUSED_RELEASE(output_shape);
#endif

// Apply activation function to floats.
Expand All @@ -250,16 +247,12 @@ inline void FullyConnectedHybrid(const FullyConnectedParams &params, const Shape
return;
}

inline void FullyConnectedSparseWeightRandom(const FullyConnectedParams &params,
const Shape &input_shape, const float *input_data,
const Shape &weights_shape, const float *weights_data,
const Shape &bias_shape, const float *bias_data,
const Shape &output_shape, float *output_data,
const uint16_t *w1_segments,
const uint16_t *w1_indices)
inline void FullyConnectedSparseWeightRandom(
const FullyConnectedParams &params, [[maybe_unused]] const Shape &input_shape,
const float *input_data, const Shape &weights_shape, const float *weights_data,
[[maybe_unused]] const Shape &bias_shape, const float *bias_data, const Shape &output_shape,
float *output_data, const uint16_t *w1_segments, const uint16_t *w1_indices)
{
UNUSED_RELEASE(params);
UNUSED_RELEASE(input_shape);

assert(weights_shape.DimensionsCount() == 2);
assert(output_shape.DimensionsCount() == 2);
Expand All @@ -271,7 +264,6 @@ inline void FullyConnectedSparseWeightRandom(const FullyConnectedParams &params,
MatchingDim(weights_shape, weights_dims_count - 2, output_shape, output_dims_count - 1);
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);

UNUSED_RELEASE(bias_shape);
if (bias_data)
{
VectorBatchVectorAssign(bias_data, output_depth, batches, output_data);
Expand Down
14 changes: 5 additions & 9 deletions compute/cker/include/cker/operation/FullyConnectedSparse16x1.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,12 @@ namespace nnfw
{
namespace cker
{
inline void FullyConnectedSparseWeight16x1(const FullyConnectedParams &params,
const Shape &input_shape, const float *input_data,
const Shape &weights_shape, const float *weights_data,
const Shape &bias_shape, const float *bias_data,
const Shape &output_shape, float *output_data,
const uint16_t *w1_segments, const uint16_t *w1_indices)
inline void FullyConnectedSparseWeight16x1(
const FullyConnectedParams &params, [[maybe_unused]] const Shape &input_shape,
const float *input_data, const Shape &weights_shape, const float *weights_data,
[[maybe_unused]] const Shape &bias_shape, const float *bias_data, const Shape &output_shape,
float *output_data, const uint16_t *w1_segments, const uint16_t *w1_indices)
{
UNUSED_RELEASE(input_shape);

assert(weights_shape.DimensionsCount() == 2);
assert(output_shape.DimensionsCount() == 2);

Expand All @@ -73,7 +70,6 @@ inline void FullyConnectedSparseWeight16x1(const FullyConnectedParams &params,
MatchingDim(weights_shape, weights_dims_count - 2, output_shape, output_dims_count - 1);
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);

UNUSED_RELEASE(bias_shape);
if (bias_data)
{
VectorBatchVectorAssign(bias_data, output_depth, batches, output_data);
Expand Down
4 changes: 1 addition & 3 deletions compute/cker/include/cker/operation/FusedBatchNorm.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ class FusedBatchNorm
const int rest_size_minus_one = (rest_size > 1) ? (rest_size - 1) : 1;
float rest_size_inv = static_cast<float>(1.0f / static_cast<float>(rest_size));
// This adjustment is for Bessel's correction
float rest_size_adjust =
[[maybe_unused]] float rest_size_adjust =
static_cast<float>(rest_size) / static_cast<float>(rest_size_minus_one);

Eigen::Tensor<float, 1, Eigen::RowMajor> batch_mean(depth);
Expand All @@ -124,8 +124,6 @@ class FusedBatchNorm
auto x_shifted =
(x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec)).template cast<float>();

UNUSED_RELEASE(rest_size_adjust);

y.reshape(rest_by_depth).device(d) = x_shifted;

memcpy(output_data, y.data(), output_shape.FlatSize() * sizeof(float));
Expand Down
8 changes: 3 additions & 5 deletions compute/cker/include/cker/operation/InstanceNorm.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ namespace cker
{

inline void InstanceNorm(const InstanceNormParams &params, const Shape &input_shape,
const float *input_data, const Shape &gamma_shape, const float *gamma_data,
const Shape &beta_shape, const float *beta_data, const Shape &output_shape,
float *output_data)
const float *input_data, [[maybe_unused]] const Shape &gamma_shape,
const float *gamma_data, [[maybe_unused]] const Shape &beta_shape,
const float *beta_data, const Shape &output_shape, float *output_data)
{
const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
const int32_t heights = MatchingDim(input_shape, 1, output_shape, 1);
Expand All @@ -40,8 +40,6 @@ inline void InstanceNorm(const InstanceNormParams &params, const Shape &input_sh
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;

UNUSED_RELEASE(gamma_shape);
UNUSED_RELEASE(beta_shape);
assert(output_activation_min <= output_activation_max);

for (int32_t batch = 0; batch < batches; batch++)
Expand Down
3 changes: 0 additions & 3 deletions compute/cker/include/cker/operation/ReduceMean.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,6 @@ template <typename In, typename Out>
void Mean(const Shape &input_shape, const In *input_data, const Shape &output_shape,
Out *output_data, const std::vector<int> &axes)
{
UNUSED_RELEASE(output_shape);
assert(input_shape.DimensionsCount() > 0);
ReduceMean m_obj;
m_obj.ReduceOp<In, Out>(input_shape, input_data, output_shape, output_data, axes, true, (Out)0,
Expand All @@ -223,7 +222,6 @@ void MeanQ8Asymm(const Shape &input_shape, const In *input_data, float input_sca
int32_t input_offset, const Shape &output_shape, Out *output_data,
float output_scale, int32_t output_offset, const std::vector<int> &axes)
{
UNUSED_RELEASE(output_shape);
assert(input_shape.DimensionsCount() > 0);
ReduceMean m_obj;
m_obj.ReduceOp<In, Out>(input_shape, input_data, input_scale, input_offset, output_shape,
Expand All @@ -235,7 +233,6 @@ template <typename In, typename Out>
void MeanAxis1And2(const Shape &input_shape, const In *input_data, const Shape &output_shape,
Out *output_data)
{
UNUSED_RELEASE(output_shape);
assert(input_shape.DimensionsCount() == 4);
assert(output_shape.DimensionsCount() == 4);

Expand Down
14 changes: 6 additions & 8 deletions compute/cker/include/cker/operation/SpaceToBatchND.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@ namespace cker
{

template <typename T>
inline void SpaceToBatchND(const SpaceToBatchParams &params, const Shape &unextended_input_shape,
const T *input_data, const Shape &unextended_block_shape_shape,
const int32_t *block_shape_data, const Shape &unextended_padding_shape,
const int32_t *paddings_data, const Shape &unextended_output_shape,
T *output_data)
inline void
SpaceToBatchND(const SpaceToBatchParams &params, const Shape &unextended_input_shape,
const T *input_data, [[maybe_unused]] const Shape &unextended_block_shape_shape,
const int32_t *block_shape_data,
[[maybe_unused]] const Shape &unextended_padding_shape, const int32_t *paddings_data,
const Shape &unextended_output_shape, T *output_data)
{
UNUSED_RELEASE(unextended_block_shape_shape);
UNUSED_RELEASE(unextended_padding_shape);

assert(unextended_input_shape.DimensionsCount() <= 4);
assert(unextended_output_shape.DimensionsCount() <= 4);
const Shape input_shape = Shape::ExtendedShape(4, unextended_input_shape);
Expand Down
1 change: 0 additions & 1 deletion compute/cker/include/cker/operation/SqDiff.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ template <typename T>
void SqDiff(const Shape &input1_shape, const T *input1_data, const Shape &input2_shape,
const T *input2_data, const Shape &output_shape, T *output_data)
{
UNUSED_RELEASE(output_shape);
assert(input1_shape.DimensionsCount() > 0 && input2_shape.DimensionsCount() > 0 &&
output_shape.DimensionsCount() > 0);
int outRank = output_shape.DimensionsCount();
Expand Down
7 changes: 2 additions & 5 deletions compute/cker/include/cker/operation/StridedSlice.h
Original file line number Diff line number Diff line change
Expand Up @@ -222,11 +222,9 @@ buildStridedSliceParams(const T *begin, const T *end, const T *strides, const ui
}

void checkOutputSize(const StridedSliceParams &op_params, const Shape &input_shape,
const Shape &output_shape, uint32_t rank)
[[maybe_unused]] const Shape &output_shape, uint32_t rank)
{
UNUSED_RELEASE(output_shape);

int32_t shape_size = 0;
[[maybe_unused]] int32_t shape_size = 0;

for (uint32_t idx = 0; idx < rank; ++idx)
{
Expand Down Expand Up @@ -254,7 +252,6 @@ void checkOutputSize(const StridedSliceParams &op_params, const Shape &input_sha
}

assert(output_shape.DimensionsCount() == shape_size);
UNUSED_RELEASE(shape_size);
}

template <typename T>
Expand Down
5 changes: 2 additions & 3 deletions compute/cker/include/cker/operation/Transpose.h
Original file line number Diff line number Diff line change
Expand Up @@ -294,12 +294,11 @@ size_t Flatten(const Shape &input_shape, const Shape &output_shape, const Transp
// Perform transpose by transposing 4x4 blocks of the input, proceeding from
// left to right (down the rows) of the input, and then from top to bottom.
template <typename T>
inline void Transpose2D(const Shape &input_shape, const T *input_data, const Shape &output_shape,
T *output_data)
inline void Transpose2D(const Shape &input_shape, const T *input_data,
[[maybe_unused]] const Shape &output_shape, T *output_data)
{
assert(input_shape.DimensionsCount() == 2);
assert(output_shape.DimensionsCount() == 2);
UNUSED_RELEASE(output_shape);

const int d0 = input_shape.DimsData()[0];
const int d1 = input_shape.DimsData()[1];
Expand Down
3 changes: 1 addition & 2 deletions compute/cker/include/cker/operation/Unpack.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace cker

template <typename Scalar>
void Unpack(const UnpackParams &params, const Shape &input_shape, const Scalar *input_data,
const Shape &output_shape, Scalar *const *output_datas)
[[maybe_unused]] const Shape &output_shape, Scalar *const *output_datas)
{
const int dimensions = input_shape.DimensionsCount();
const int outputs_count = params.num_split;
Expand All @@ -44,7 +44,6 @@ void Unpack(const UnpackParams &params, const Shape &input_shape, const Scalar *
copy_size *= input_shape.Dims(i);
}
assert(output_shape.FlatSize() == copy_size * outer_size);
UNUSED_RELEASE(output_shape);

for (int i = 0; i < outputs_count; ++i)
{
Expand Down
Loading

0 comments on commit 46b27b2

Please sign in to comment.