diff --git a/compute/cker/include/cker/Shape.h b/compute/cker/include/cker/Shape.h index 9269ce9aabc..8e604a945d7 100644 --- a/compute/cker/include/cker/Shape.h +++ b/compute/cker/include/cker/Shape.h @@ -23,8 +23,6 @@ #include #include -#define UNUSED_RELEASE(a) (void)(a) - namespace nnfw { namespace cker @@ -219,20 +217,18 @@ class Shape }; }; -inline int MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2) +inline int MatchingDim(const Shape &shape1, int index1, [[maybe_unused]] const Shape &shape2, + [[maybe_unused]] int index2) { - UNUSED_RELEASE(shape2); - UNUSED_RELEASE(index2); assert(shape1.Dims(index1) == shape2.Dims(index2)); return shape1.Dims(index1); } template -int MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2, Args... args) +int MatchingDim(const Shape &shape1, int index1, [[maybe_unused]] const Shape &shape2, + [[maybe_unused]] int index2, Args... args) { assert(shape1.Dims(index1) == shape2.Dims(index2)); - UNUSED_RELEASE(shape2); - UNUSED_RELEASE(index2); return MatchingDim(shape1, index1, args...); } @@ -305,9 +301,9 @@ template inline int MatchingFlatSize(const Shape &shape, Ts... return shape.FlatSize(); } -inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, const Shape &check_shape_0) +inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, + [[maybe_unused]] const Shape &check_shape_0) { - UNUSED_RELEASE(check_shape_0); const int dims_count = shape.DimensionsCount(); for (int i = 0; i < dims_count; ++i) { @@ -319,10 +315,10 @@ inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, const Shape return FlatSizeSkipDim(shape, skip_dim); } -inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, const Shape &check_shape_0, +inline int MatchingFlatSizeSkipDim(const Shape &shape, int skip_dim, + [[maybe_unused]] const Shape &check_shape_0, const Shape &check_shape_1) { - UNUSED_RELEASE(check_shape_0); const int dims_count = shape.DimensionsCount(); for (int i = 0; i < dims_count; ++i) { @@ -338,12 +334,10 @@ inline int MatchingElementsSize(const Shape &shape, const Shape &check_shape_0, const Shape &check_shape_1) { const int size_1 = shape.FlatSize(); - const int size_2 = check_shape_0.FlatSize(); - const int size_3 = check_shape_1.FlatSize(); + [[maybe_unused]] const int size_2 = check_shape_0.FlatSize(); + [[maybe_unused]] const int size_3 = check_shape_1.FlatSize(); assert(size_1 == size_2); assert(size_2 == size_3); - UNUSED_RELEASE(size_2); - UNUSED_RELEASE(size_3); return size_1; } diff --git a/compute/cker/include/cker/Types.h b/compute/cker/include/cker/Types.h index 944e6e3dff8..ba661f6094d 100644 --- a/compute/cker/include/cker/Types.h +++ b/compute/cker/include/cker/Types.h @@ -541,7 +541,8 @@ struct GemmParams // Validates self-consistency of GemmParams. template -void ValidateGemmParams(const GemmParams ¶ms) +void ValidateGemmParams( + [[maybe_unused]] const GemmParams ¶ms) { // Guard consistency of the quantized multiplier fields. if (quantization_flavor == QuantizationFlavor::kFloatingPoint) @@ -576,7 +577,6 @@ void ValidateGemmParams(const GemmParams -void ArgMinMax(const Shape &input1_shape, const T1 *input1_data, const Shape &output_shape, - T2 *output_data, int32_t axis, const Cmp &cmp) +void ArgMinMax(const Shape &input1_shape, const T1 *input1_data, + [[maybe_unused]] const Shape &output_shape, T2 *output_data, int32_t axis, + const Cmp &cmp) { - UNUSED_RELEASE(output_shape); assert(input1_shape.DimensionsCount() > 0); assert(input1_shape.DimensionsCount() - 1 == output_shape.DimensionsCount()); if (axis < 0) diff --git a/compute/cker/include/cker/operation/Concatenation.h b/compute/cker/include/cker/operation/Concatenation.h index b73fa5c14b7..64399e60ffd 100644 --- a/compute/cker/include/cker/operation/Concatenation.h +++ b/compute/cker/include/cker/operation/Concatenation.h @@ -39,7 +39,7 @@ inline void Concatenation(const ConcatenationParams ¶ms, const Shape *const const int concat_dimensions = output_shape.DimensionsCount(); assert(axis < concat_dimensions); - int64_t concat_size = 0; + [[maybe_unused]] int64_t concat_size = 0; for (int i = 0; i < inputs_count; i++) { assert(input_shapes[i]->DimensionsCount() == concat_dimensions); @@ -47,14 +47,12 @@ inline void Concatenation(const ConcatenationParams ¶ms, const Shape *const { if (j != axis) { - auto dim_checked = MatchingDim(*input_shapes[i], j, output_shape, j); - UNUSED_RELEASE(dim_checked); + [[maybe_unused]] auto dim_checked = MatchingDim(*input_shapes[i], j, output_shape, j); } } concat_size += input_shapes[i]->Dims(axis); } assert(concat_size == output_shape.Dims(axis)); - UNUSED_RELEASE(concat_size); int64_t outer_size = 1; for (int i = 0; i < axis; ++i) { @@ -97,7 +95,7 @@ inline void ConcatenationWithScaling(const ConcatenationParams ¶ms, const int concat_dimensions = output_shape.DimensionsCount(); assert(axis <= concat_dimensions); - int64_t concat_size = 0; + [[maybe_unused]] int64_t concat_size = 0; for (int i = 0; i < inputs_count; i++) { assert(input_shapes[i]->DimensionsCount() == concat_dimensions); @@ -111,7 +109,6 @@ inline void ConcatenationWithScaling(const ConcatenationParams ¶ms, concat_size += input_shapes[i]->Dims(axis); } assert(concat_size == output_shape.Dims(axis)); - UNUSED_RELEASE(concat_size); int64_t outer_size = 1; for (int i = 0; i < axis; ++i) { diff --git a/compute/cker/include/cker/operation/FullyConnected.h b/compute/cker/include/cker/operation/FullyConnected.h index 71a2f19efbb..fc62deb7ecf 100644 --- a/compute/cker/include/cker/operation/FullyConnected.h +++ b/compute/cker/include/cker/operation/FullyConnected.h @@ -128,14 +128,12 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu #endif // CKER_X86_PLATFORM -inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &input_shape, - const uint8_t *input_data, const Shape &filter_shape, - const uint8_t *filter_data, const Shape &bias_shape, - const int32_t *bias_data, const Shape &output_shape, - uint8_t *output_data) +inline void FullyConnected(const FullyConnectedParams ¶ms, + [[maybe_unused]] const Shape &input_shape, const uint8_t *input_data, + const Shape &filter_shape, const uint8_t *filter_data, + [[maybe_unused]] const Shape &bias_shape, const int32_t *bias_data, + const Shape &output_shape, uint8_t *output_data) { - UNUSED_RELEASE(input_shape); - UNUSED_RELEASE(bias_shape); const int32_t input_offset = params.input_offset; const int32_t filter_offset = params.weights_offset; const int32_t output_offset = params.output_offset; @@ -185,8 +183,9 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu inline void FullyConnectedHybrid(const FullyConnectedParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &filter_shape, const int8_t *filter_data, const Shape &, const float *bias_data, - const Shape &output_shape, float *output_data, - FCTempArena &temp_arena, ruy::Context *ruy_context) + [[maybe_unused]] const Shape &output_shape, float *output_data, + FCTempArena &temp_arena, + [[maybe_unused]] ruy::Context *ruy_context) { int total_input_size = input_shape.FlatSize(); const int input_size = filter_shape.Dims(1); @@ -237,8 +236,6 @@ inline void FullyConnectedHybrid(const FullyConnectedParams ¶ms, const Shape MatrixBatchVectorMultiplyAccumulate(filter_data, num_units, input_size, quant_data, scaling_factors_ptr, batch_size, output_data, /*result_stride=*/1); - UNUSED_RELEASE(ruy_context); - UNUSED_RELEASE(output_shape); #endif // Apply activation function to floats. @@ -250,16 +247,12 @@ inline void FullyConnectedHybrid(const FullyConnectedParams ¶ms, const Shape return; } -inline void FullyConnectedSparseWeightRandom(const FullyConnectedParams ¶ms, - const Shape &input_shape, const float *input_data, - const Shape &weights_shape, const float *weights_data, - const Shape &bias_shape, const float *bias_data, - const Shape &output_shape, float *output_data, - const uint16_t *w1_segments, - const uint16_t *w1_indices) +inline void FullyConnectedSparseWeightRandom( + const FullyConnectedParams ¶ms, [[maybe_unused]] const Shape &input_shape, + const float *input_data, const Shape &weights_shape, const float *weights_data, + [[maybe_unused]] const Shape &bias_shape, const float *bias_data, const Shape &output_shape, + float *output_data, const uint16_t *w1_segments, const uint16_t *w1_indices) { - UNUSED_RELEASE(params); - UNUSED_RELEASE(input_shape); assert(weights_shape.DimensionsCount() == 2); assert(output_shape.DimensionsCount() == 2); @@ -271,7 +264,6 @@ inline void FullyConnectedSparseWeightRandom(const FullyConnectedParams ¶ms, MatchingDim(weights_shape, weights_dims_count - 2, output_shape, output_dims_count - 1); const int accum_depth = weights_shape.Dims(weights_dims_count - 1); - UNUSED_RELEASE(bias_shape); if (bias_data) { VectorBatchVectorAssign(bias_data, output_depth, batches, output_data); diff --git a/compute/cker/include/cker/operation/FullyConnectedSparse16x1.h b/compute/cker/include/cker/operation/FullyConnectedSparse16x1.h index df397f73ecf..60b7104fda4 100644 --- a/compute/cker/include/cker/operation/FullyConnectedSparse16x1.h +++ b/compute/cker/include/cker/operation/FullyConnectedSparse16x1.h @@ -54,15 +54,12 @@ namespace nnfw { namespace cker { -inline void FullyConnectedSparseWeight16x1(const FullyConnectedParams ¶ms, - const Shape &input_shape, const float *input_data, - const Shape &weights_shape, const float *weights_data, - const Shape &bias_shape, const float *bias_data, - const Shape &output_shape, float *output_data, - const uint16_t *w1_segments, const uint16_t *w1_indices) +inline void FullyConnectedSparseWeight16x1( + const FullyConnectedParams ¶ms, [[maybe_unused]] const Shape &input_shape, + const float *input_data, const Shape &weights_shape, const float *weights_data, + [[maybe_unused]] const Shape &bias_shape, const float *bias_data, const Shape &output_shape, + float *output_data, const uint16_t *w1_segments, const uint16_t *w1_indices) { - UNUSED_RELEASE(input_shape); - assert(weights_shape.DimensionsCount() == 2); assert(output_shape.DimensionsCount() == 2); @@ -73,7 +70,6 @@ inline void FullyConnectedSparseWeight16x1(const FullyConnectedParams ¶ms, MatchingDim(weights_shape, weights_dims_count - 2, output_shape, output_dims_count - 1); const int accum_depth = weights_shape.Dims(weights_dims_count - 1); - UNUSED_RELEASE(bias_shape); if (bias_data) { VectorBatchVectorAssign(bias_data, output_depth, batches, output_data); diff --git a/compute/cker/include/cker/operation/FusedBatchNorm.h b/compute/cker/include/cker/operation/FusedBatchNorm.h index 8a97d842138..88d48fc0114 100644 --- a/compute/cker/include/cker/operation/FusedBatchNorm.h +++ b/compute/cker/include/cker/operation/FusedBatchNorm.h @@ -104,7 +104,7 @@ class FusedBatchNorm const int rest_size_minus_one = (rest_size > 1) ? (rest_size - 1) : 1; float rest_size_inv = static_cast(1.0f / static_cast(rest_size)); // This adjustment is for Bessel's correction - float rest_size_adjust = + [[maybe_unused]] float rest_size_adjust = static_cast(rest_size) / static_cast(rest_size_minus_one); Eigen::Tensor batch_mean(depth); @@ -124,8 +124,6 @@ class FusedBatchNorm auto x_shifted = (x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec)).template cast(); - UNUSED_RELEASE(rest_size_adjust); - y.reshape(rest_by_depth).device(d) = x_shifted; memcpy(output_data, y.data(), output_shape.FlatSize() * sizeof(float)); diff --git a/compute/cker/include/cker/operation/InstanceNorm.h b/compute/cker/include/cker/operation/InstanceNorm.h index 8fa8b03bc40..39ec17be412 100644 --- a/compute/cker/include/cker/operation/InstanceNorm.h +++ b/compute/cker/include/cker/operation/InstanceNorm.h @@ -29,9 +29,9 @@ namespace cker { inline void InstanceNorm(const InstanceNormParams ¶ms, const Shape &input_shape, - const float *input_data, const Shape &gamma_shape, const float *gamma_data, - const Shape &beta_shape, const float *beta_data, const Shape &output_shape, - float *output_data) + const float *input_data, [[maybe_unused]] const Shape &gamma_shape, + const float *gamma_data, [[maybe_unused]] const Shape &beta_shape, + const float *beta_data, const Shape &output_shape, float *output_data) { const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); const int32_t heights = MatchingDim(input_shape, 1, output_shape, 1); @@ -40,8 +40,6 @@ inline void InstanceNorm(const InstanceNormParams ¶ms, const Shape &input_sh const float output_activation_min = params.float_activation_min; const float output_activation_max = params.float_activation_max; - UNUSED_RELEASE(gamma_shape); - UNUSED_RELEASE(beta_shape); assert(output_activation_min <= output_activation_max); for (int32_t batch = 0; batch < batches; batch++) diff --git a/compute/cker/include/cker/operation/ReduceMean.h b/compute/cker/include/cker/operation/ReduceMean.h index 924e8503740..3a44655f922 100644 --- a/compute/cker/include/cker/operation/ReduceMean.h +++ b/compute/cker/include/cker/operation/ReduceMean.h @@ -211,7 +211,6 @@ template void Mean(const Shape &input_shape, const In *input_data, const Shape &output_shape, Out *output_data, const std::vector &axes) { - UNUSED_RELEASE(output_shape); assert(input_shape.DimensionsCount() > 0); ReduceMean m_obj; m_obj.ReduceOp(input_shape, input_data, output_shape, output_data, axes, true, (Out)0, @@ -223,7 +222,6 @@ void MeanQ8Asymm(const Shape &input_shape, const In *input_data, float input_sca int32_t input_offset, const Shape &output_shape, Out *output_data, float output_scale, int32_t output_offset, const std::vector &axes) { - UNUSED_RELEASE(output_shape); assert(input_shape.DimensionsCount() > 0); ReduceMean m_obj; m_obj.ReduceOp(input_shape, input_data, input_scale, input_offset, output_shape, @@ -235,7 +233,6 @@ template void MeanAxis1And2(const Shape &input_shape, const In *input_data, const Shape &output_shape, Out *output_data) { - UNUSED_RELEASE(output_shape); assert(input_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); diff --git a/compute/cker/include/cker/operation/SpaceToBatchND.h b/compute/cker/include/cker/operation/SpaceToBatchND.h index aff36e2f38f..aaa613132bb 100644 --- a/compute/cker/include/cker/operation/SpaceToBatchND.h +++ b/compute/cker/include/cker/operation/SpaceToBatchND.h @@ -27,15 +27,13 @@ namespace cker { template -inline void SpaceToBatchND(const SpaceToBatchParams ¶ms, const Shape &unextended_input_shape, - const T *input_data, const Shape &unextended_block_shape_shape, - const int32_t *block_shape_data, const Shape &unextended_padding_shape, - const int32_t *paddings_data, const Shape &unextended_output_shape, - T *output_data) +inline void +SpaceToBatchND(const SpaceToBatchParams ¶ms, const Shape &unextended_input_shape, + const T *input_data, [[maybe_unused]] const Shape &unextended_block_shape_shape, + const int32_t *block_shape_data, + [[maybe_unused]] const Shape &unextended_padding_shape, const int32_t *paddings_data, + const Shape &unextended_output_shape, T *output_data) { - UNUSED_RELEASE(unextended_block_shape_shape); - UNUSED_RELEASE(unextended_padding_shape); - assert(unextended_input_shape.DimensionsCount() <= 4); assert(unextended_output_shape.DimensionsCount() <= 4); const Shape input_shape = Shape::ExtendedShape(4, unextended_input_shape); diff --git a/compute/cker/include/cker/operation/SqDiff.h b/compute/cker/include/cker/operation/SqDiff.h index 93428d5fd67..d230116ad50 100644 --- a/compute/cker/include/cker/operation/SqDiff.h +++ b/compute/cker/include/cker/operation/SqDiff.h @@ -63,7 +63,6 @@ template void SqDiff(const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, const T *input2_data, const Shape &output_shape, T *output_data) { - UNUSED_RELEASE(output_shape); assert(input1_shape.DimensionsCount() > 0 && input2_shape.DimensionsCount() > 0 && output_shape.DimensionsCount() > 0); int outRank = output_shape.DimensionsCount(); diff --git a/compute/cker/include/cker/operation/StridedSlice.h b/compute/cker/include/cker/operation/StridedSlice.h index ecb587d7e5d..894118a5e54 100644 --- a/compute/cker/include/cker/operation/StridedSlice.h +++ b/compute/cker/include/cker/operation/StridedSlice.h @@ -222,11 +222,9 @@ buildStridedSliceParams(const T *begin, const T *end, const T *strides, const ui } void checkOutputSize(const StridedSliceParams &op_params, const Shape &input_shape, - const Shape &output_shape, uint32_t rank) + [[maybe_unused]] const Shape &output_shape, uint32_t rank) { - UNUSED_RELEASE(output_shape); - - int32_t shape_size = 0; + [[maybe_unused]] int32_t shape_size = 0; for (uint32_t idx = 0; idx < rank; ++idx) { @@ -254,7 +252,6 @@ void checkOutputSize(const StridedSliceParams &op_params, const Shape &input_sha } assert(output_shape.DimensionsCount() == shape_size); - UNUSED_RELEASE(shape_size); } template diff --git a/compute/cker/include/cker/operation/Transpose.h b/compute/cker/include/cker/operation/Transpose.h index 52c826c396d..d7bd09107f2 100644 --- a/compute/cker/include/cker/operation/Transpose.h +++ b/compute/cker/include/cker/operation/Transpose.h @@ -294,12 +294,11 @@ size_t Flatten(const Shape &input_shape, const Shape &output_shape, const Transp // Perform transpose by transposing 4x4 blocks of the input, proceeding from // left to right (down the rows) of the input, and then from top to bottom. template -inline void Transpose2D(const Shape &input_shape, const T *input_data, const Shape &output_shape, - T *output_data) +inline void Transpose2D(const Shape &input_shape, const T *input_data, + [[maybe_unused]] const Shape &output_shape, T *output_data) { assert(input_shape.DimensionsCount() == 2); assert(output_shape.DimensionsCount() == 2); - UNUSED_RELEASE(output_shape); const int d0 = input_shape.DimsData()[0]; const int d1 = input_shape.DimsData()[1]; diff --git a/compute/cker/include/cker/operation/Unpack.h b/compute/cker/include/cker/operation/Unpack.h index 242aadf46d3..c9d500dcb5a 100644 --- a/compute/cker/include/cker/operation/Unpack.h +++ b/compute/cker/include/cker/operation/Unpack.h @@ -28,7 +28,7 @@ namespace cker template void Unpack(const UnpackParams ¶ms, const Shape &input_shape, const Scalar *input_data, - const Shape &output_shape, Scalar *const *output_datas) + [[maybe_unused]] const Shape &output_shape, Scalar *const *output_datas) { const int dimensions = input_shape.DimensionsCount(); const int outputs_count = params.num_split; @@ -44,7 +44,6 @@ void Unpack(const UnpackParams ¶ms, const Shape &input_shape, const Scalar * copy_size *= input_shape.Dims(i); } assert(output_shape.FlatSize() == copy_size * outer_size); - UNUSED_RELEASE(output_shape); for (int i = 0; i < outputs_count; ++i) { diff --git a/compute/cker/include/cker/operation/optimized/Conv.h b/compute/cker/include/cker/operation/optimized/Conv.h index 6e0e129c600..6b6be8a9b95 100644 --- a/compute/cker/include/cker/operation/optimized/Conv.h +++ b/compute/cker/include/cker/operation/optimized/Conv.h @@ -81,9 +81,10 @@ inline void AddBiasAndEvalActivationFunction(float output_activation_min, } inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8_t *input_data, - const Shape &filter_shape, const uint8_t *filter_data, const Shape &bias_shape, - const int32_t *bias_data, const Shape &output_shape, uint8_t *output_data, - const Shape &im2col_shape, uint8_t *im2col_data) + const Shape &filter_shape, const uint8_t *filter_data, + [[maybe_unused]] const Shape &bias_shape, const int32_t *bias_data, + const Shape &output_shape, uint8_t *output_data, const Shape &im2col_shape, + uint8_t *im2col_data) { gemmlowp::GemmContext *gemm_context = gemm_support::GetGemmLowpContext(); @@ -156,7 +157,6 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 assert(output_cols == gemm_input_cols); assert(filter_cols == gemm_input_rows); assert(bias_shape.FlatSize() == output_rows); - UNUSED_RELEASE(bias_shape); gemmlowp::MatrixMap filter_matrix( filter_data, filter_rows, filter_cols); gemmlowp::MatrixMap input_matrix( diff --git a/compute/cker/include/cker/operation/optimized/DepthwiseConvFloat.h b/compute/cker/include/cker/operation/optimized/DepthwiseConvFloat.h index 17b2fc7a28b..e7f1a6a65a8 100644 --- a/compute/cker/include/cker/operation/optimized/DepthwiseConvFloat.h +++ b/compute/cker/include/cker/operation/optimized/DepthwiseConvFloat.h @@ -1032,11 +1032,10 @@ inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth, // means that it will calculate DepthwiseConv for output_data[:, 2:5, :, :]. inline void DepthwiseConvImpl(const DepthwiseConvParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &filter_shape, - const float *filter_data, const Shape &bias_shape, + const float *filter_data, [[maybe_unused]] const Shape &bias_shape, const float *bias_data, const Shape &output_shape, float *output_data, int thread_start, int thread_end, int thread_dim) { - UNUSED_RELEASE(bias_shape); const int stride_width = params.stride_width; const int stride_height = params.stride_height; const int pad_width = params.padding_values.width; @@ -1067,13 +1066,11 @@ inline void DepthwiseConvImpl(const DepthwiseConvParams ¶ms, const Shape &in float acc_buffer[kAccBufferMaxSize]; assert(kAccBufferMaxSize >= output_depth); const int kOutputPixelsInAccBuffer = kAccBufferMaxSize / output_depth; - const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth; + [[maybe_unused]] const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth; assert(kOutputPixelsInAccBuffer * output_depth <= kAccBufferActualSize); assert(kAccBufferActualSize <= kAccBufferMaxSize); assert(kOutputPixelsInAccBuffer >= 1); - UNUSED_RELEASE(kAccBufferActualSize); - // row_accum_func will point to the core accumulation function to be used // for this DepthwiseConv op. using row_accum_func_t = decltype(&FloatDepthwiseConvAccumRowGeneric); diff --git a/compute/cker/include/cker/operation/optimized/DepthwiseConvUint8.h b/compute/cker/include/cker/operation/optimized/DepthwiseConvUint8.h index 5ca56fd0945..aca59540ef4 100644 --- a/compute/cker/include/cker/operation/optimized/DepthwiseConvUint8.h +++ b/compute/cker/include/cker/operation/optimized/DepthwiseConvUint8.h @@ -1851,14 +1851,12 @@ inline void DepthwiseConvGeneral(const DepthwiseConvParams ¶ms, const Shape int32_t acc_buffer[kAccBufferMaxSize]; assert(kAccBufferMaxSize >= output_depth); const int kOutputPixelsInAccBuffer = kAccBufferMaxSize / output_depth; - const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth; + [[maybe_unused]] const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth; assert(kOutputPixelsInAccBuffer * output_depth <= kAccBufferActualSize); assert(kAccBufferActualSize <= kAccBufferMaxSize); assert(kOutputPixelsInAccBuffer >= 1); assert(thread_dim == 0 || thread_dim == 1); - UNUSED_RELEASE(kAccBufferActualSize); - // row_accum_func will point to the core accumulation function to be used // for this DepthwiseConv op. using row_accum_func_t = decltype(&QuantizedDepthwiseConvAccumRowGeneric); @@ -2159,30 +2157,22 @@ inline void DepthwiseConvWithRounding(const DepthwiseConvParams ¶ms, const S uint8_t *output_data, int thread_start, int thread_end, int thread_dim) { - const int depth_multiplier = params.depth_multiplier; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; + [[maybe_unused]] const int depth_multiplier = params.depth_multiplier; + [[maybe_unused]] const int32_t output_activation_min = params.quantized_activation_min; + [[maybe_unused]] const int32_t output_activation_max = params.quantized_activation_max; + [[maybe_unused]] const int dilation_width_factor = params.dilation_width_factor; + [[maybe_unused]] const int dilation_height_factor = params.dilation_height_factor; assert(dilation_width_factor >= 1); assert(dilation_height_factor >= 1); assert(input_shape.DimensionsCount() == 4); assert(filter_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); assert(output_activation_min <= output_activation_max); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_depth = input_shape.Dims(3); + [[maybe_unused]] const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + [[maybe_unused]] const int input_depth = input_shape.Dims(3); assert(output_depth == input_depth * depth_multiplier); assert(bias_shape.FlatSize() == output_depth); - UNUSED_RELEASE(depth_multiplier); - UNUSED_RELEASE(output_activation_min); - UNUSED_RELEASE(output_activation_max); - UNUSED_RELEASE(dilation_width_factor); - UNUSED_RELEASE(dilation_height_factor); - UNUSED_RELEASE(output_depth); - UNUSED_RELEASE(input_depth); - // Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on // Jetson TX-2. This compiler does not support the offsetof() macro. #if defined(__aarch64__) && !defined(GOOGLE_L4T) diff --git a/compute/cker/include/cker/operation/optimized/integer_ops/DepthwiseConvInt8.h b/compute/cker/include/cker/operation/optimized/integer_ops/DepthwiseConvInt8.h index bd84979202d..ace682a317d 100644 --- a/compute/cker/include/cker/operation/optimized/integer_ops/DepthwiseConvInt8.h +++ b/compute/cker/include/cker/operation/optimized/integer_ops/DepthwiseConvInt8.h @@ -1769,8 +1769,7 @@ inline void DepthwiseConvGeneral(const DepthwiseConvParams ¶ms, int32_t acc_buffer[kAccBufferMaxSize]; assert(kAccBufferMaxSize >= output_depth); const int kOutputPixelsInAccBuffer = kAccBufferMaxSize / output_depth; - const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth; - UNUSED_RELEASE(kAccBufferActualSize); + [[maybe_unused]] const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth; assert(kOutputPixelsInAccBuffer * output_depth <= kAccBufferActualSize); assert(kAccBufferActualSize <= kAccBufferMaxSize); assert(kOutputPixelsInAccBuffer >= 1); @@ -1924,21 +1923,16 @@ inline void DepthwiseConvWithRounding(const DepthwiseConvParams ¶ms, const Shape &output_shape, int8_t *output_data, int thread_start, int thread_end, int thread_dim) { - const int depth_multiplier = params.depth_multiplier; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - UNUSED_RELEASE(depth_multiplier); - UNUSED_RELEASE(dilation_width_factor); - UNUSED_RELEASE(dilation_height_factor); + [[maybe_unused]] const int depth_multiplier = params.depth_multiplier; + [[maybe_unused]] const int dilation_width_factor = params.dilation_width_factor; + [[maybe_unused]] const int dilation_height_factor = params.dilation_height_factor; assert(dilation_width_factor >= 1); assert(dilation_height_factor >= 1); assert(input_shape.DimensionsCount() == 4); assert(filter_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_depth = input_shape.Dims(3); - UNUSED_RELEASE(output_depth); - UNUSED_RELEASE(input_depth); + [[maybe_unused]] const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + [[maybe_unused]] const int input_depth = input_shape.Dims(3); assert(output_depth == input_depth * depth_multiplier); assert(bias_shape.FlatSize() == output_depth); diff --git a/compute/cker/include/cker/operation/reference/Conv.h b/compute/cker/include/cker/operation/reference/Conv.h index e316083a55c..71b7d59a7ca 100644 --- a/compute/cker/include/cker/operation/reference/Conv.h +++ b/compute/cker/include/cker/operation/reference/Conv.h @@ -31,8 +31,9 @@ namespace reference { inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const float *input_data, - const Shape &filter_shape, const float *filter_data, const Shape &bias_shape, - const float *bias_data, const Shape &output_shape, float *output_data) + const Shape &filter_shape, const float *filter_data, + [[maybe_unused]] const Shape &bias_shape, const float *bias_data, + const Shape &output_shape, float *output_data) { const int stride_width = params.stride_width; const int stride_height = params.stride_height; @@ -45,7 +46,6 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const float assert(input_shape.DimensionsCount() == 4); assert(filter_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); - UNUSED_RELEASE(bias_shape); const int batches = MatchingDim(input_shape, 0, output_shape, 0); const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); @@ -107,8 +107,9 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const float } inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8_t *input_data, - const Shape &filter_shape, const uint8_t *filter_data, const Shape &bias_shape, - const int32_t *bias_data, const Shape &output_shape, uint8_t *output_data) + const Shape &filter_shape, const uint8_t *filter_data, + [[maybe_unused]] const Shape &bias_shape, const int32_t *bias_data, + const Shape &output_shape, uint8_t *output_data) { const int stride_width = params.stride_width; const int stride_height = params.stride_height; @@ -128,7 +129,6 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 assert(input_shape.DimensionsCount() == 4); assert(filter_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); - UNUSED_RELEASE(bias_shape); const int batches = MatchingDim(input_shape, 0, output_shape, 0); const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); @@ -191,14 +191,13 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 } template -inline void Conv(const ConvParams ¶ms, const int32_t *output_multiplier, - const int32_t *output_shift, const Shape &input_shape, const T *input_data, - const Shape &filter_shape, const T *filter_data, const int32_t *filter_zeropoint, - const Shape &bias_shape, const int32_t *bias_data, const Shape &output_shape, - T *output_data) +inline void +Conv(const ConvParams ¶ms, const int32_t *output_multiplier, const int32_t *output_shift, + const Shape &input_shape, const T *input_data, const Shape &filter_shape, const T *filter_data, + [[maybe_unused]] const int32_t *filter_zeropoint, [[maybe_unused]] const Shape &bias_shape, + const int32_t *bias_data, const Shape &output_shape, T *output_data) { - UNUSED_RELEASE(bias_shape); // Get parameters. const int32_t input_offset = params.input_offset; // r = s(q - Z) const int stride_width = params.stride_width; @@ -289,7 +288,6 @@ inline void Conv(const ConvParams ¶ms, const int32_t *output_multiplier, // TODO(jianlijianli): Add a check to make sure the // accumulator depth is smaller than 2^16. acc += filter_val * (input_val + input_offset); - UNUSED_RELEASE(filter_zeropoint); } } } @@ -316,7 +314,7 @@ inline void Conv(const ConvParams ¶ms, const int32_t *output_multiplier, inline void HybridConvPerChannel(const ConvParams ¶ms, float *scaling_factors_ptr, const Shape &input_shape, const int8_t *input_data, const Shape &filter_shape, const int8_t *filter_data, - const Shape &bias_shape, const float *bias_data, + [[maybe_unused]] const Shape &bias_shape, const float *bias_data, const Shape &output_shape, float *output_data, const float *per_channel_scale, const int32_t *input_offset) @@ -338,7 +336,6 @@ inline void HybridConvPerChannel(const ConvParams ¶ms, float *scaling_factor if (bias_data) { assert(bias_shape.FlatSize() == output_depth); - UNUSED_RELEASE(bias_shape); } const int input_height = input_shape.Dims(1); const int input_width = input_shape.Dims(2); diff --git a/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvHybrid.h b/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvHybrid.h index 9fc58ad3b1c..fabc166f286 100644 --- a/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvHybrid.h +++ b/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvHybrid.h @@ -53,7 +53,7 @@ inline void DepthwiseConvHybridPerChannel(const DepthwiseConvParams ¶ms, assert(output_shape.DimensionsCount() == 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + [[maybe_unused]] const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); const int input_height = input_shape.Dims(1); const int input_width = input_shape.Dims(2); const int input_depth = input_shape.Dims(3); @@ -62,8 +62,6 @@ inline void DepthwiseConvHybridPerChannel(const DepthwiseConvParams ¶ms, const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); const int bias_depth = bias_shape.FlatSize(); - UNUSED_RELEASE(output_depth); - UNUSED_RELEASE(bias_shape); assert(output_depth == input_depth * depth_multiplier); assert(bias_depth == output_depth); diff --git a/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvUInt8.h b/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvUInt8.h index 025e4070535..40e7b35eb93 100644 --- a/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvUInt8.h +++ b/compute/cker/include/cker/operation/reference/integer_ops/DepthwiseConvUInt8.h @@ -32,7 +32,8 @@ inline void DepthwiseConvPerChannel(const DepthwiseConvParams ¶ms, const int32_t *output_multiplier, const int32_t *output_shift, const Shape &input_shape, const uint8_t *input_data, const Shape &filter_shape, const uint8_t *filter_data, - const int32_t *filter_zeropoint, const Shape &bias_shape, + const int32_t *filter_zeropoint, + [[maybe_unused]] const Shape &bias_shape, const int32_t *bias_data, const Shape &output_shape, uint8_t *output_data) { @@ -57,7 +58,7 @@ inline void DepthwiseConvPerChannel(const DepthwiseConvParams ¶ms, assert(output_activation_min <= output_activation_max); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + [[maybe_unused]] const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); const int input_height = input_shape.Dims(1); const int input_width = input_shape.Dims(2); const int input_depth = input_shape.Dims(3); @@ -65,8 +66,6 @@ inline void DepthwiseConvPerChannel(const DepthwiseConvParams ¶ms, const int filter_width = filter_shape.Dims(2); const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); - UNUSED_RELEASE(output_depth); - UNUSED_RELEASE(bias_shape); assert(output_depth == input_depth * depth_multiplier); assert(bias_shape.FlatSize() == output_depth);