From 21fb7915474d9ce2cc005aded6834ab2a350f636 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Tue, 27 Feb 2024 06:05:46 +0100 Subject: [PATCH] fix: change type of I from Option> to Option> --- .../operators/neural-network/nn.max_pool.md | 6 +- src/operators/nn/core.cairo | 8 +-- src/operators/nn/functional/common_pool.cairo | 5 +- src/operators/nn/functional/max_pool.cairo | 61 ++++++++++--------- .../nn/implementations/nn_fp16x16.cairo | 4 +- .../nn/implementations/nn_fp32x32.cairo | 17 ++++-- .../nn/implementations/nn_fp64x64.cairo | 17 ++++-- .../nn/implementations/nn_fp8x23.cairo | 4 +- src/operators/nn/implementations/nn_i32.cairo | 2 +- src/operators/nn/implementations/nn_i8.cairo | 3 +- src/operators/nn/implementations/nn_u32.cairo | 3 +- 11 files changed, 69 insertions(+), 61 deletions(-) diff --git a/docs/framework/operators/neural-network/nn.max_pool.md b/docs/framework/operators/neural-network/nn.max_pool.md index eb21f4c8c..e3ad8d84e 100644 --- a/docs/framework/operators/neural-network/nn.max_pool.md +++ b/docs/framework/operators/neural-network/nn.max_pool.md @@ -12,7 +12,7 @@ storage_order: Option, strides: Option>, output_len: usize, -) -> (Tensor, Option>); +) -> (Tensor, Option>); ``` MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. @@ -32,7 +32,7 @@ MaxPool consumes an input tensor X and applies max pooling across the tensor acc ## Returns A `Tensor` that contains the result of the max pool. -A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. +A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. ## Examples ```rust @@ -43,7 +43,7 @@ use orion::numbers::FP16x16; use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; -fn example_max_pool() -> (Tensor, Option>) { +fn example_max_pool() -> (Tensor, Option>) { let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(1); diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 032880942..d74b43f6d 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1319,7 +1319,7 @@ trait NNTrait { /// storage_order: Option, /// strides: Option>, /// output_len: usize, - /// ) -> (Tensor, Option>); + /// ) -> (Tensor, Option>); /// ``` /// /// MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. @@ -1339,7 +1339,7 @@ trait NNTrait { /// ## Returns /// /// A `Tensor` that contains the result of the max pool. - /// A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. + /// A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. /// ## Examples /// /// ```rust @@ -1350,7 +1350,7 @@ trait NNTrait { /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; /// /// - /// fn example_max_pool() -> (Tensor, Option>) { + /// fn example_max_pool() -> (Tensor, Option>) { /// let mut shape = ArrayTrait::::new(); /// shape.append(1); /// shape.append(1); @@ -1424,5 +1424,5 @@ trait NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>); + ) -> (Tensor, Option>); } diff --git a/src/operators/nn/functional/common_pool.cairo b/src/operators/nn/functional/common_pool.cairo index 02d8826ce..70bde406a 100644 --- a/src/operators/nn/functional/common_pool.cairo +++ b/src/operators/nn/functional/common_pool.cairo @@ -11,7 +11,6 @@ use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::numbers::FP16x16; use orion::operators::nn::{AUTO_PAD, POOLING_TYPE}; - fn common_pool< T, MAG, @@ -29,7 +28,6 @@ fn common_pool< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, @@ -44,7 +42,7 @@ fn common_pool< pads: Option>, strides: Option>, p: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let padding_value: T = match pooling_type { POOLING_TYPE::AVG => { let padding_value = if count_include_pad == 0 { @@ -188,7 +186,6 @@ fn pool< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, diff --git a/src/operators/nn/functional/max_pool.cairo b/src/operators/nn/functional/max_pool.cairo index 69e060a2b..c6fc8b5c8 100644 --- a/src/operators/nn/functional/max_pool.cairo +++ b/src/operators/nn/functional/max_pool.cairo @@ -30,7 +30,6 @@ fn max_pool< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, @@ -44,7 +43,7 @@ fn max_pool< storage_order: Option, strides: Option>, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { match dilations { Option::Some(dilations) => { if (min(dilations) != max(dilations) || min(dilations) != 1) { @@ -173,7 +172,6 @@ fn max_pool_implementation< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, @@ -187,7 +185,7 @@ fn max_pool_implementation< storage_order: Option, strides: Option>, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); let n_dims = kernel_shape.len(); @@ -469,7 +467,7 @@ fn max_pool_1d, +NumberTrait, +Copy, +Drop strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let mut y_dims = ArrayTrait::new(); y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); y_dims.append_span(output_spatial_shape); @@ -523,14 +521,14 @@ fn max_pool_1d, +NumberTrait, +Copy, +Drop }; Y_data.append(Yh); - I_data.append((c * x_step).into() + h_index); + I_data.append((c * x_step) + h_index.into()); ph += 1; }; c += 1; }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } return ( TensorTrait::new(y_dims.span(), Y_data.span()), @@ -559,7 +557,7 @@ fn max_pool_2d< strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let mut y_dims = ArrayTrait::new(); y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); y_dims.append_span(output_spatial_shape); @@ -651,9 +649,9 @@ fn max_pool_2d< if Yh != NumberTrait::::min_value() { Y_data.append(Yh); if storage_order == 0 { - I_data.append((c * x_step).into() + h_index * W.into() + w_index); + I_data.append((c * x_step) + h_index.into() * W + w_index.into()); } else { - I_data.append((c * x_step).into() + h_index + w_index * H.into()); + I_data.append((c * x_step) + h_index.into() + w_index.into() * H); } } pw += 1; @@ -664,7 +662,7 @@ fn max_pool_2d< }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } return ( TensorTrait::new(y_dims.span(), Y_data.span()), @@ -693,7 +691,7 @@ fn max_pool_3d< strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let mut y_dims = ArrayTrait::new(); y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); y_dims.append_span(output_spatial_shape); @@ -813,18 +811,18 @@ fn max_pool_3d< if storage_order == 0 { I_data .append( - (c * x_step).into() - + h_index * W.into() * D.into() - + w_index * D.into() - + d_index + (c * x_step) + + h_index.into() * W * D + + w_index.into() * D + + d_index.into() ); } else { I_data .append( - (c * x_step).into() - + h_index - + w_index * H.into() - + d_index * H.into() * W.into() + (c * x_step) + + h_index.into() + + w_index.into() * H + + d_index.into() * H * W ); } pd += 1; @@ -837,7 +835,7 @@ fn max_pool_3d< }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } return ( TensorTrait::new(y_dims.span(), Y_data.span()), @@ -857,7 +855,7 @@ fn max_pool_nd< +PartialEq, +PrintTrait, +TryInto, - +Into, + +Into, +Div >( X: @Tensor, @@ -870,7 +868,7 @@ fn max_pool_nd< strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let nd = (*X).shape.len() - 2; let mut y_dims = ArrayTrait::new(); @@ -938,8 +936,10 @@ fn max_pool_nd< nstart.append(ns); nend.append(ns + *ks_n.at(n) * *dilation_n.at(n)); - let a: T = NumberTrait::new_unscaled(((*nend.at(n) - ns)).into(), false); - let b: T = NumberTrait::new_unscaled((*dilation_n.at(n)).into(), false); + let a: T = NumberTrait::new_unscaled( + (*kernel_shape.at(n) * *dilations.at(n)).into(), false + ); + let b: T = NumberTrait::new_unscaled((*dilations.at(n)).into(), false); nstep.append(NumberTrait::ceil(a / b).try_into().unwrap()); n += 1; }; @@ -1004,7 +1004,7 @@ fn max_pool_nd< index += *n_index.at(n) * (*x_stride.at(2 + n)).into(); n += 1; }; - I_data.append((c * x_step).into() + index); + I_data.append((c * x_step) + index.into()); } else { let mut index = 0; let mut n = nd; @@ -1015,16 +1015,19 @@ fn max_pool_nd< index += *n_index.at(n - 1) * (*i_stride_storage_order_1.at(nd - n)).into(); n -= 1; }; - I_data.append((c * x_step).into() + index); + I_data.append((c * x_step) + index.into()); } p += 1; }; c += 1; }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return ( + TensorTrait::new(y_dims.span(), Y_data.span()), + Option::Some(TensorTrait::new(y_dims.span(), I_data.span())) + ); } diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index aa6a7b6ce..24d631a21 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -13,8 +13,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::operators::tensor::implementations::tensor_fp16x16wide::{ FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd }; -use orion::numbers::I32IntoU32; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP16x16NN of NNTrait { @@ -159,7 +157,7 @@ impl FP16x16NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { functional::max_pool::max_pool( X, auto_pad, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 3054dd1ad..c14e9f544 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -7,8 +7,6 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x use orion::operators::tensor::implementations::tensor_fp32x32::{ FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd }; -use orion::numbers::I32IntoU32; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP32x32NN of NNTrait { @@ -153,8 +151,17 @@ impl FP32x32NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { - //functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len) - panic(array!['not supported!']) + ) -> (Tensor, Option>) { + functional::max_pool::max_pool( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len + ) } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index a378cfb70..c40f6f636 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -7,8 +7,6 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x use orion::operators::tensor::implementations::tensor_fp64x64::{ FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd }; -//use orion::numbers::I32IntoU64; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP64x64NN of NNTrait { @@ -153,8 +151,17 @@ impl FP64x64NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { - //functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len) - panic(array!['not supported!']) + ) -> (Tensor, Option>) { + functional::max_pool::max_pool( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len + ) } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index add955fd9..73cea38fa 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -11,8 +11,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W }; use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor}; -use orion::numbers::I32IntoU32; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP8x23NN of NNTrait { @@ -155,7 +153,7 @@ impl FP8x23NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { functional::max_pool::max_pool( X, auto_pad, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 0156fc5f5..0532b74eb 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -144,7 +144,7 @@ impl I32NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { panic(array!['not supported!']) } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index 284dd5ee1..dc56224bc 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -4,7 +4,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl I8NN of NNTrait { @@ -145,7 +144,7 @@ impl I8NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { panic(array!['not supported!']) } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 1ebfb3bec..f061b90ae 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -4,7 +4,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl U32NN of NNTrait { @@ -145,7 +144,7 @@ impl U32NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { panic(array!['not supported!']) } }