Skip to content

Commit

Permalink
fix: change type of I from Option<Tensor<i32>> to Option<Tensor<usize>>
Browse files Browse the repository at this point in the history
  • Loading branch information
chachaleo committed Feb 27, 2024
1 parent eeceae9 commit 21fb791
Show file tree
Hide file tree
Showing 11 changed files with 69 additions and 61 deletions.
6 changes: 3 additions & 3 deletions docs/framework/operators/neural-network/nn.max_pool.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>);
) -> (Tensor<T>, Option<Tensor<usize>>);
```

MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.
Expand All @@ -32,7 +32,7 @@ MaxPool consumes an input tensor X and applies max pooling across the tensor acc
## Returns

A `Tensor<T>` that contains the result of the max pool.
A `Option<Tensor<i32>>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor.
A `Option<Tensor<usize>>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor.
## Examples

```rust
Expand All @@ -43,7 +43,7 @@ use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};


fn example_max_pool() -> (Tensor<FP16x16>, Option<Tensor<i32>>) {
fn example_max_pool() -> (Tensor<FP16x16>, Option<Tensor<usize>>) {
let mut shape = ArrayTrait::<usize>::new();
shape.append(1);
shape.append(1);
Expand Down
8 changes: 4 additions & 4 deletions src/operators/nn/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -1319,7 +1319,7 @@ trait NNTrait<T> {
/// storage_order: Option<usize>,
/// strides: Option<Span<usize>>,
/// output_len: usize,
/// ) -> (Tensor<T>, Option<Tensor<i32>>);
/// ) -> (Tensor<T>, Option<Tensor<usize>>);
/// ```
///
/// MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized.
Expand All @@ -1339,7 +1339,7 @@ trait NNTrait<T> {
/// ## Returns
///
/// A `Tensor<T>` that contains the result of the max pool.
/// A `Option<Tensor<i32>>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor.
/// A `Option<Tensor<usize>>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor.
/// ## Examples
///
/// ```rust
Expand All @@ -1350,7 +1350,7 @@ trait NNTrait<T> {
/// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
///
///
/// fn example_max_pool() -> (Tensor<FP16x16>, Option<Tensor<i32>>) {
/// fn example_max_pool() -> (Tensor<FP16x16>, Option<Tensor<usize>>) {
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(1);
/// shape.append(1);
Expand Down Expand Up @@ -1424,5 +1424,5 @@ trait NNTrait<T> {
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>);
) -> (Tensor<T>, Option<Tensor<usize>>);
}
5 changes: 1 addition & 4 deletions src/operators/nn/functional/common_pool.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number};
use orion::numbers::FP16x16;
use orion::operators::nn::{AUTO_PAD, POOLING_TYPE};


fn common_pool<
T,
MAG,
Expand All @@ -29,7 +28,6 @@ fn common_pool<
+PartialEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+Into<i32, MAG>,
+Rem<T>,
+Neg<T>,
+SubEq<T>,
Expand All @@ -44,7 +42,7 @@ fn common_pool<
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
p: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
let padding_value: T = match pooling_type {
POOLING_TYPE::AVG => {
let padding_value = if count_include_pad == 0 {
Expand Down Expand Up @@ -188,7 +186,6 @@ fn pool<
+PartialEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+Into<i32, MAG>,
+Rem<T>,
+Neg<T>,
+SubEq<T>,
Expand Down
61 changes: 32 additions & 29 deletions src/operators/nn/functional/max_pool.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ fn max_pool<
+PartialEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+Into<i32, MAG>,
+Rem<T>,
+Neg<T>,
+SubEq<T>,
Expand All @@ -44,7 +43,7 @@ fn max_pool<
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
match dilations {
Option::Some(dilations) => {
if (min(dilations) != max(dilations) || min(dilations) != 1) {
Expand Down Expand Up @@ -173,7 +172,6 @@ fn max_pool_implementation<
+PartialEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+Into<i32, MAG>,
+Rem<T>,
+Neg<T>,
+SubEq<T>,
Expand All @@ -187,7 +185,7 @@ fn max_pool_implementation<
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
assert((*X).shape.len() >= 3, 'X must have at least 3 dim');
let n_dims = kernel_shape.len();

Expand Down Expand Up @@ -469,7 +467,7 @@ fn max_pool_1d<T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>
strides: Span<usize>,
output_spatial_shape: Span<usize>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
let mut y_dims = ArrayTrait::new();
y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2));
y_dims.append_span(output_spatial_shape);
Expand Down Expand Up @@ -523,14 +521,14 @@ fn max_pool_1d<T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>
};

Y_data.append(Yh);
I_data.append((c * x_step).into() + h_index);
I_data.append((c * x_step) + h_index.into());

ph += 1;
};
c += 1;
};
if output_len == 1 {
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<i32>>::None);
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<usize>>::None);
}
return (
TensorTrait::new(y_dims.span(), Y_data.span()),
Expand Down Expand Up @@ -559,7 +557,7 @@ fn max_pool_2d<
strides: Span<usize>,
output_spatial_shape: Span<usize>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
let mut y_dims = ArrayTrait::new();
y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2));
y_dims.append_span(output_spatial_shape);
Expand Down Expand Up @@ -651,9 +649,9 @@ fn max_pool_2d<
if Yh != NumberTrait::<T>::min_value() {
Y_data.append(Yh);
if storage_order == 0 {
I_data.append((c * x_step).into() + h_index * W.into() + w_index);
I_data.append((c * x_step) + h_index.into() * W + w_index.into());
} else {
I_data.append((c * x_step).into() + h_index + w_index * H.into());
I_data.append((c * x_step) + h_index.into() + w_index.into() * H);
}
}
pw += 1;
Expand All @@ -664,7 +662,7 @@ fn max_pool_2d<
};

if output_len == 1 {
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<i32>>::None);
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<usize>>::None);
}
return (
TensorTrait::new(y_dims.span(), Y_data.span()),
Expand Down Expand Up @@ -693,7 +691,7 @@ fn max_pool_3d<
strides: Span<usize>,
output_spatial_shape: Span<usize>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
let mut y_dims = ArrayTrait::new();
y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2));
y_dims.append_span(output_spatial_shape);
Expand Down Expand Up @@ -813,18 +811,18 @@ fn max_pool_3d<
if storage_order == 0 {
I_data
.append(
(c * x_step).into()
+ h_index * W.into() * D.into()
+ w_index * D.into()
+ d_index
(c * x_step)
+ h_index.into() * W * D
+ w_index.into() * D
+ d_index.into()
);
} else {
I_data
.append(
(c * x_step).into()
+ h_index
+ w_index * H.into()
+ d_index * H.into() * W.into()
(c * x_step)
+ h_index.into()
+ w_index.into() * H
+ d_index.into() * H * W
);
}
pd += 1;
Expand All @@ -837,7 +835,7 @@ fn max_pool_3d<
};

if output_len == 1 {
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<i32>>::None);
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<usize>>::None);
}
return (
TensorTrait::new(y_dims.span(), Y_data.span()),
Expand All @@ -857,7 +855,7 @@ fn max_pool_nd<
+PartialEq<T>,
+PrintTrait<T>,
+TryInto<T, usize>,
+Into<i32, MAG>,
+Into<usize, MAG>,
+Div<T>
>(
X: @Tensor<T>,
Expand All @@ -870,7 +868,7 @@ fn max_pool_nd<
strides: Span<usize>,
output_spatial_shape: Span<usize>,
output_len: usize,
) -> (Tensor<T>, Option<Tensor<i32>>) {
) -> (Tensor<T>, Option<Tensor<usize>>) {
let nd = (*X).shape.len() - 2;

let mut y_dims = ArrayTrait::new();
Expand Down Expand Up @@ -938,8 +936,10 @@ fn max_pool_nd<
nstart.append(ns);
nend.append(ns + *ks_n.at(n) * *dilation_n.at(n));

let a: T = NumberTrait::new_unscaled(((*nend.at(n) - ns)).into(), false);
let b: T = NumberTrait::new_unscaled((*dilation_n.at(n)).into(), false);
let a: T = NumberTrait::new_unscaled(
(*kernel_shape.at(n) * *dilations.at(n)).into(), false
);
let b: T = NumberTrait::new_unscaled((*dilations.at(n)).into(), false);
nstep.append(NumberTrait::ceil(a / b).try_into().unwrap());
n += 1;
};
Expand Down Expand Up @@ -1004,7 +1004,7 @@ fn max_pool_nd<
index += *n_index.at(n) * (*x_stride.at(2 + n)).into();
n += 1;
};
I_data.append((c * x_step).into() + index);
I_data.append((c * x_step) + index.into());
} else {
let mut index = 0;
let mut n = nd;
Expand All @@ -1015,16 +1015,19 @@ fn max_pool_nd<
index += *n_index.at(n - 1) * (*i_stride_storage_order_1.at(nd - n)).into();
n -= 1;
};
I_data.append((c * x_step).into() + index);
I_data.append((c * x_step) + index.into());
}
p += 1;
};
c += 1;
};
if output_len == 1 {
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<i32>>::None);
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<usize>>::None);
}
return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::<Tensor<i32>>::None);
return (
TensorTrait::new(y_dims.span(), Y_data.span()),
Option::Some(TensorTrait::new(y_dims.span(), I_data.span()))
);
}


Expand Down
4 changes: 1 addition & 3 deletions src/operators/nn/implementations/nn_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
use orion::operators::tensor::implementations::tensor_fp16x16wide::{
FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd
};
use orion::numbers::I32IntoU32;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
use orion::operators::nn::AUTO_PAD;

impl FP16x16NN of NNTrait<FP16x16> {
Expand Down Expand Up @@ -159,7 +157,7 @@ impl FP16x16NN of NNTrait<FP16x16> {
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<FP16x16>, Option<Tensor<i32>>) {
) -> (Tensor<FP16x16>, Option<Tensor<usize>>) {
functional::max_pool::max_pool(
X,
auto_pad,
Expand Down
17 changes: 12 additions & 5 deletions src/operators/nn/implementations/nn_fp32x32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x
use orion::operators::tensor::implementations::tensor_fp32x32::{
FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd
};
use orion::numbers::I32IntoU32;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
use orion::operators::nn::AUTO_PAD;

impl FP32x32NN of NNTrait<FP32x32> {
Expand Down Expand Up @@ -153,8 +151,17 @@ impl FP32x32NN of NNTrait<FP32x32> {
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<FP32x32>, Option<Tensor<i32>>) {
//functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len)
panic(array!['not supported!'])
) -> (Tensor<FP32x32>, Option<Tensor<usize>>) {
functional::max_pool::max_pool(
X,
auto_pad,
ceil_mode,
dilations,
kernel_shape,
pads,
storage_order,
strides,
output_len
)
}
}
17 changes: 12 additions & 5 deletions src/operators/nn/implementations/nn_fp64x64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x
use orion::operators::tensor::implementations::tensor_fp64x64::{
FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd
};
//use orion::numbers::I32IntoU64;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
use orion::operators::nn::AUTO_PAD;

impl FP64x64NN of NNTrait<FP64x64> {
Expand Down Expand Up @@ -153,8 +151,17 @@ impl FP64x64NN of NNTrait<FP64x64> {
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<FP64x64>, Option<Tensor<i32>>) {
//functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len)
panic(array!['not supported!'])
) -> (Tensor<FP64x64>, Option<Tensor<usize>>) {
functional::max_pool::max_pool(
X,
auto_pad,
ceil_mode,
dilations,
kernel_shape,
pads,
storage_order,
strides,
output_len
)
}
}
4 changes: 1 addition & 3 deletions src/operators/nn/implementations/nn_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W
};
use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor};
use orion::numbers::I32IntoU32;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
use orion::operators::nn::AUTO_PAD;

impl FP8x23NN of NNTrait<FP8x23> {
Expand Down Expand Up @@ -155,7 +153,7 @@ impl FP8x23NN of NNTrait<FP8x23> {
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<FP8x23>, Option<Tensor<i32>>) {
) -> (Tensor<FP8x23>, Option<Tensor<usize>>) {
functional::max_pool::max_pool(
X,
auto_pad,
Expand Down
2 changes: 1 addition & 1 deletion src/operators/nn/implementations/nn_i32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ impl I32NN of NNTrait<i32> {
storage_order: Option<usize>,
strides: Option<Span<usize>>,
output_len: usize,
) -> (Tensor<i32>, Option<Tensor<i32>>) {
) -> (Tensor<i32>, Option<Tensor<usize>>) {
panic(array!['not supported!'])
}
}
Loading

0 comments on commit 21fb791

Please sign in to comment.