From c97734bda65e523285cf440f64ba4d4cb61e0a14 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Mon, 25 Dec 2023 15:10:58 -0800 Subject: [PATCH 01/40] everything done except generating tests --- nodegen/node/reduce_log_sum_exp.py | 124 +++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 nodegen/node/reduce_log_sum_exp.py diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py new file mode 100644 index 000000000..32cf0761b --- /dev/null +++ b/nodegen/node/reduce_log_sum_exp.py @@ -0,0 +1,124 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl + +class Reduce_log_sum_exp(RunAll): + @staticmethod + def reduce_log_sum_exp_fp8x23(): + def reduce_log_sum_exp_export_do_not_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = False + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "reduce_log_sum_exp_fp8x23_export_do_not_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, false)", name) + + def reduce_log_sum_exp_export_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "reduce_log_sum_exp_fp8x23_export_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, true)", name) + + def reduce_log_sum_exp_axis_0(): + shape = [3, 2, 2] + axes = np.array([0], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape).astype(np.int64) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(0, true)", name) + + reduce_log_sum_exp_export_do_not_keepdims() + reduce_log_sum_exp_export_keepdims() + reduce_log_sum_exp_axis_0() + + @staticmethod + def reduce_log_sum_exp_fp16x16(): + def reduce_log_sum_exp_export_do_not_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = False + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_log_sum_exp_fp16x16_export_do_not_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, false)", name) + + def reduce_log_sum_exp_export_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_log_sum_exp_fp16x16_export_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, true)", name) + + def reduce_log_sum_exp_axis_0(): + shape = [3, 2, 2] + axes = np.array([0], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(0, true)", name) + + reduce_log_sum_exp_export_do_not_keepdims() + reduce_log_sum_exp_export_keepdims() + reduce_log_sum_exp_axis_0() + + + + + + + From 36e53602bcd46e7d99d34d8b6af0e89189dac158 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Mon, 25 Dec 2023 15:13:08 -0800 Subject: [PATCH 02/40] forgot to commit cairo files --- docs/framework/numbers/fixed-point/README.md | 1 - docs/framework/operators/tensor/README.md | 1 + .../tensor/tensor.reduce_log_sum_exp.md | 35 +++++++++++++++++ src/operators/tensor/core.cairo | 38 +++++++++++++++++++ .../tensor/implementations/tensor_bool.cairo | 4 ++ .../implementations/tensor_fp16x16.cairo | 4 ++ .../implementations/tensor_fp16x16wide.cairo | 4 ++ .../implementations/tensor_fp32x32.cairo | 4 ++ .../implementations/tensor_fp64x64.cairo | 4 ++ .../implementations/tensor_fp8x23.cairo | 4 ++ .../implementations/tensor_fp8x23wide.cairo | 4 ++ .../tensor/implementations/tensor_i32.cairo | 6 +++ .../tensor/implementations/tensor_i8.cairo | 4 ++ .../tensor/implementations/tensor_u32.cairo | 4 ++ .../tensor/math/reduce_log_sum_exp.cairo | 31 +++++++++++++++ 15 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md create mode 100644 src/operators/tensor/math/reduce_log_sum_exp.cairo diff --git a/docs/framework/numbers/fixed-point/README.md b/docs/framework/numbers/fixed-point/README.md index 7da277d55..f30122676 100644 --- a/docs/framework/numbers/fixed-point/README.md +++ b/docs/framework/numbers/fixed-point/README.md @@ -69,7 +69,6 @@ use orion::numbers::fixed_point::core::FixedTrait; | [`fp.sinh`](fp.sinh.md) | Returns the value of the hyperbolic sine of the fixed point number. | | [`fp.tanh`](fp.tanh.md) | Returns the value of the hyperbolic tangent of the fixed point number. | | [`fp.sign`](fp.sign.md) | Returns the element-wise indication of the sign of the input fixed point number. | -| [`fp.erf`](fp.erf.md) | The error function of the input fixed point number computed element-wise.| ### Arithmetic & Comparison operators diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 575546094..2a55f5196 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -124,6 +124,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.not`](tensor.not.md) | Computes the logical negation of all elements in the input tensor. | | [`tensor.reduce_log_sum`](tensor.reduce\_log\_sum.md) | Computes the log sum of the input tensor's elements along the provided axes. | | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | +| [`tensor.reduced_log_sum_exp`](tensor.reduced\_log\_sum\_exp.md) | Computes the log sum of the exponentials of the input tensor's elements along the provided axes. | ## Arithmetic Operations diff --git a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md new file mode 100644 index 000000000..dbed98a59 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md @@ -0,0 +1,35 @@ +## tensor.reduce_log_sum_exp + +```rust + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; +``` + +Computes the log sum of the exponentials of the input tensor's elements along the provided axes. + +## Args +* 'self'(`@Tensor`) - The input tensor. +* 'axis'(`usize`) - The dimension to reduce. +* 'keepdims'(`bool`) - If true, retains reduced dimensions with length 1. + +## Panics + +* Panics if axis is not in the range of the input tensor's dimensions. + +## Returns + +Returns a new `Tensor` instance with the specified axis reduced by summing its elements. + + +## Example + +fn reduce_log_sum_exp() -> Tensor { + +let tensor = TensorTrait::new( + shape: array![2, 2, 2].span(), + data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), +); + +We can call `reduce_log_sum_exp` function as follows. + +return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); +>>> \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 514cb1aab..3e59d48cb 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -123,6 +123,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -4902,6 +4903,43 @@ trait TensorTrait { /// ``` /// fn reduce_log_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// ## tensor.reduce_log_sum_exp + /// + /// ```rust + /// fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// ``` + /// + /// Computes the log sum of the exponentials of the input tensor's elements along the provided axes. + /// + /// ## Args + /// * 'self'(`@Tensor`) - The input tensor. + /// * 'axis'(`usize`) - The dimension to reduce. + /// * 'keepdims'(`bool`) - If true, retains reduced dimensions with length 1. + /// + /// ## Panics + /// + /// * Panics if axis is not in the range of the input tensor's dimensions. + /// + /// ## Returns + /// + /// Returns a new `Tensor` instance with the specified axis reduced by summing its elements. + /// + /// + /// ## Example + /// + /// fn reduce_log_sum_exp() -> Tensor { + /// + /// let tensor = TensorTrait::new( + /// shape: array![2, 2, 2].span(), + /// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), + /// ); + /// + /// We can call `reduce_log_sum_exp` function as follows. + /// + /// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + /// >>> + + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ## tensor.erf /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 207d36dcb..926506ac9 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -466,6 +466,10 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported']) + } + fn unique( self: @Tensor, axis: Option, sorted: Option ) -> (Tensor, Tensor, Tensor, Tensor) { diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 81ca299bb..81270f2e6 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -524,6 +524,10 @@ impl FP16x16Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 906dd5942..e5f56e40e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -490,6 +490,10 @@ impl FP16x16WTensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index cb6c89ca3..30f4bd461 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -525,6 +525,10 @@ impl FP32x32Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 2b0c5af26..2296d12b1 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -526,6 +526,10 @@ impl FP64x64Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 36e438b1a..09210f993 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -524,6 +524,10 @@ impl FP8x23Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index e6b3a2f3d..fe09fbe10 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -477,6 +477,10 @@ impl FP8x23WTensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { math::erf::erf(*self) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 052874e83..2e46a7235 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -81,6 +81,8 @@ impl I32Tensor of TensorTrait { math::reduce_sum::reduce_sum(self, axis, keepdims) } + + fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -521,6 +523,10 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 988d03137..618771a2a 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -519,6 +519,10 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + panic(array!['not supported']) + } + fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index c6004b19d..bf81dc75e 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -462,6 +462,10 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } + fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo new file mode 100644 index 000000000..6fbe7f199 --- /dev/null +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -0,0 +1,31 @@ +use core::option::OptionTrait; +use core::array::ArrayTrait; +use core::array::SpanTrait; +use core::debug::PrintTrait; + +use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use orion::numbers::signed_integer::integer_trait::IntegerTrait; +use orion::numbers::fixed_point::core::FixedTrait; + +/// Cf: TensorTrait::reduce_log_sum_exp docstring +fn reduce_log_sum_exp< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TMul: Mul, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>( + self: @Tensor, axis: usize, keepdims: bool +) -> Tensor { + let tensor_exp = self.exp(); + let tensor_exp_sum = tensor_exp.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_exp_sum_log = tensor_exp_sum.log(); + + return tensor_exp_sum_log; +} + + From f0070230d4762d01b9ae738cd9b5eb08cc434fac Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Tue, 2 Jan 2024 00:03:46 -0800 Subject: [PATCH 03/40] Fixed typos in docs and generated test files --- docs/framework/operators/tensor/README.md | 2 +- nodegen/node/reduce_log_sum_exp.py | 5 ---- src/operators/tensor/core.cairo | 2 +- src/operators/tensor/math/example.cairo | 2 ++ .../tensor/math/reduce_log_sum_exp.cairo | 2 +- tests/nodes.cairo | 6 +++++ ...m_exp_fp16x16_export_do_not_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 19 ++++++++++++++ ..._log_sum_exp_fp16x16_export_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 20 ++++++++++++++ ...p16x16_export_negative_axes_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 18 +++++++++++++ ...um_exp_fp8x23_export_do_not_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 19 ++++++++++++++ ...e_log_sum_exp_fp8x23_export_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 20 ++++++++++++++ ...fp8x23_export_negative_axes_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 18 +++++++++++++ ...g_sum_fp16x16_export_do_not_keepdims.cairo | 12 ++++----- ...duce_log_sum_fp16x16_export_keepdims.cairo | 12 ++++----- ...p16x16_export_negative_axes_keepdims.cairo | 12 ++++----- ...og_sum_fp8x23_export_do_not_keepdims.cairo | 12 ++++----- ...educe_log_sum_fp8x23_export_keepdims.cairo | 12 ++++----- ...fp8x23_export_negative_axes_keepdims.cairo | 12 ++++----- 30 files changed, 437 insertions(+), 44 deletions(-) create mode 100644 src/operators/tensor/math/example.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 2a55f5196..b64b303ed 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -124,7 +124,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.not`](tensor.not.md) | Computes the logical negation of all elements in the input tensor. | | [`tensor.reduce_log_sum`](tensor.reduce\_log\_sum.md) | Computes the log sum of the input tensor's elements along the provided axes. | | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | -| [`tensor.reduced_log_sum_exp`](tensor.reduced\_log\_sum\_exp.md) | Computes the log sum of the exponentials of the input tensor's elements along the provided axes. | +| [`tensor.reduce_log_sum_exp`](tensor.reduce\_log\_sum\_exp.md) | Computes the log sum of the exponentials of the input tensor's elements along the provided axes. | ## Arithmetic Operations diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py index 32cf0761b..9f054657a 100644 --- a/nodegen/node/reduce_log_sum_exp.py +++ b/nodegen/node/reduce_log_sum_exp.py @@ -102,11 +102,6 @@ def reduce_log_sum_exp_axis_0(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) name = "reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims" make_test( diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 3e59d48cb..d1a4c4c43 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -123,7 +123,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// diff --git a/src/operators/tensor/math/example.cairo b/src/operators/tensor/math/example.cairo new file mode 100644 index 000000000..1564f7193 --- /dev/null +++ b/src/operators/tensor/math/example.cairo @@ -0,0 +1,2 @@ +use core::option::OptionTrait; + diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index 6fbe7f199..c1ba3ba82 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -1,7 +1,7 @@ use core::option::OptionTrait; use core::array::ArrayTrait; use core::array::SpanTrait; -use core::debug::PrintTrait; +use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index ab219683d..8099dd734 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -836,3 +836,9 @@ mod unique_u32_with_axis_zero_sorted; mod unique_u32_with_axis_zero_not_sorted; mod unique_u32_with_axis_one_sorted; mod unique_u32_with_axis_one_not_sorted; +mod reduce_log_sum_exp_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_exp_fp16x16_export_keepdims; +mod reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims; +mod reduce_log_sum_exp_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_exp_fp8x23_export_keepdims; +mod reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims; diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo new file mode 100644 index 000000000..5dfc3eb02 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp16x16_export_do_not_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0) = input_0.reduce_log_sum_exp(2, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo new file mode 100644 index 000000000..572168299 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo new file mode 100644 index 000000000..70a904155 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 151601, sign: false }); + data.append(FP16x16 { mag: 282673, sign: false }); + data.append(FP16x16 { mag: 413745, sign: false }); + data.append(FP16x16 { mag: 544817, sign: false }); + data.append(FP16x16 { mag: 675889, sign: false }); + data.append(FP16x16 { mag: 806961, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo new file mode 100644 index 000000000..59dd7e634 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp16x16_export_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0) = input_0.reduce_log_sum_exp(2, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo new file mode 100644 index 000000000..572168299 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo new file mode 100644 index 000000000..ed2af0a03 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 151601, sign: false }); + data.append(FP16x16 { mag: 282673, sign: false }); + data.append(FP16x16 { mag: 413745, sign: false }); + data.append(FP16x16 { mag: 544817, sign: false }); + data.append(FP16x16 { mag: 675889, sign: false }); + data.append(FP16x16 { mag: 806961, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo new file mode 100644 index 000000000..461a44977 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0) = input_0.reduce_log_sum_exp(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo new file mode 100644 index 000000000..572168299 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo new file mode 100644 index 000000000..8916ec453 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 591035, sign: false }); + data.append(FP16x16 { mag: 656571, sign: false }); + data.append(FP16x16 { mag: 722107, sign: false }); + data.append(FP16x16 { mag: 787643, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo new file mode 100644 index 000000000..1e1fc12be --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::FP8x23Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp8x23_export_do_not_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0) = input_0.reduce_log_sum_exp(2, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo new file mode 100644 index 000000000..d8f5ac09d --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo new file mode 100644 index 000000000..6bc1c44c7 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 19405045, sign: false }); + data.append(FP8x23 { mag: 36182261, sign: false }); + data.append(FP8x23 { mag: 52959477, sign: false }); + data.append(FP8x23 { mag: 69736693, sign: false }); + data.append(FP8x23 { mag: 86513909, sign: false }); + data.append(FP8x23 { mag: 103291125, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo new file mode 100644 index 000000000..e7aacd66d --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::FP8x23Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp8x23_export_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0) = input_0.reduce_log_sum_exp(2, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo new file mode 100644 index 000000000..d8f5ac09d --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo new file mode 100644 index 000000000..c10b9ef29 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 19405045, sign: false }); + data.append(FP8x23 { mag: 36182261, sign: false }); + data.append(FP8x23 { mag: 52959477, sign: false }); + data.append(FP8x23 { mag: 69736693, sign: false }); + data.append(FP8x23 { mag: 86513909, sign: false }); + data.append(FP8x23 { mag: 103291125, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo new file mode 100644 index 000000000..cf4caa23a --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::FP8x23Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0) = input_0.reduce_log_sum_exp(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo new file mode 100644 index 000000000..d8f5ac09d --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo new file mode 100644 index 000000000..bc0ebd740 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 75652487, sign: false }); + data.append(FP8x23 { mag: 84041095, sign: false }); + data.append(FP8x23 { mag: 92429703, sign: false }); + data.append(FP8x23 { mag: 100818311, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo index 108ef328f..ebfc90df1 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_fp16x16_export_do_not_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.reduce_log_sum(2, false); + let y_0 = input_0.reduce_log_sum(2, false); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo index 5ee464e1c..ae2174319 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_fp16x16_export_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.reduce_log_sum(2, true); + let y_0 = input_0.reduce_log_sum(2, true); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo index 7f7fc7f98..24121d4a3 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_fp16x16_export_negative_axes_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.reduce_log_sum(0, true); + let y_0 = input_0.reduce_log_sum(0, true); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo index 3f0adf3eb..8e63a2eeb 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_fp8x23_export_do_not_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.reduce_log_sum(2, false); + let y_0 = input_0.reduce_log_sum(2, false); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo index 5662f1510..2d1d01c00 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_fp8x23_export_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.reduce_log_sum(2, true); + let y_0 = input_0.reduce_log_sum(2, true); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo index ec295a396..1a04f7be2 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_fp8x23_export_negative_axes_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.reduce_log_sum(0, true); + let y_0 = input_0.reduce_log_sum(0, true); - assert_eq(y, z); + assert_eq(y_0, z_0); } From 9c859dae9b1b7014d842f2e6685ab068e58d3aa7 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Tue, 2 Jan 2024 02:39:33 -0800 Subject: [PATCH 04/40] Added clarity on operator notes --- .../tensor/tensor.reduce_log_sum_exp.md | 58 ++++++++++++++++--- src/operators/tensor/core.cairo | 58 ++++++++++++++++--- .../tensor/math/reduce_log_sum_exp.cairo | 2 +- ...m_exp_fp16x16_export_do_not_keepdims.cairo | 6 +- ..._log_sum_exp_fp16x16_export_keepdims.cairo | 6 +- ...p16x16_export_negative_axes_keepdims.cairo | 6 +- ...um_exp_fp8x23_export_do_not_keepdims.cairo | 6 +- ...e_log_sum_exp_fp8x23_export_keepdims.cairo | 6 +- ...fp8x23_export_negative_axes_keepdims.cairo | 6 +- ...g_sum_fp16x16_export_do_not_keepdims.cairo | 6 +- ...duce_log_sum_fp16x16_export_keepdims.cairo | 6 +- ...p16x16_export_negative_axes_keepdims.cairo | 6 +- ...og_sum_fp8x23_export_do_not_keepdims.cairo | 6 +- ...educe_log_sum_fp8x23_export_keepdims.cairo | 6 +- ...fp8x23_export_negative_axes_keepdims.cairo | 6 +- 15 files changed, 135 insertions(+), 55 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md index dbed98a59..b631371d5 100644 --- a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md +++ b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md @@ -20,16 +20,56 @@ Computes the log sum of the exponentials of the input tensor's elements along th Returns a new `Tensor` instance with the specified axis reduced by summing its elements. -## Example +## Example -fn reduce_log_sum_exp() -> Tensor { +```rust +use core::array::{ArrayTrait, SpanTrait}; + use orion::operators::tensor::{TensorTrait, Tensor}; + use orion::operators::tensor::FP8x23Tensor; + use orion::numbers::{FixedTrait, FP8x23}; -let tensor = TensorTrait::new( - shape: array![2, 2, 2].span(), - data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), -); + fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); -We can call `reduce_log_sum_exp` function as follows. + let mut data = ArrayTrait::new(); + data.append(FixedTrait::new_unscaled(1, false)); + data.append(FixedTrait::new_unscaled(2, false)); + data.append(FixedTrait::new_unscaled(3, false)); + data.append(FixedTrait::new_unscaled(4, false)); + data.append(FixedTrait::new_unscaled(5, false)); + data.append(FixedTrait::new_unscaled(6, false)); + data.append(FixedTrait::new_unscaled(7, false)); + data.append(FixedTrait::new_unscaled(8, false)); + data.append(FixedTrait::new_unscaled(9, false)); + data.append(FixedTrait::new_unscaled(10, false)); + data.append(FixedTrait::new_unscaled(11, false)); + data.append(FixedTrait::new_unscaled(12, false)); -return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); ->>> \ No newline at end of file +>>> ( + [[[1, 2] + [3, 4]] + + [[5, 6]] + [7, 8]] + + [[9, 10] + [11, 12]]] + ) + + + let tensor = TensorTrait::new(shape.span(), data.span()) + + return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); +} + +>>> + ( + [[2.31, 4.31] + [6.31, 8.31] + [10.31, 12.31]] + ) + +``` \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index d1a4c4c43..7970b69e8 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -4925,19 +4925,59 @@ trait TensorTrait { /// Returns a new `Tensor` instance with the specified axis reduced by summing its elements. /// /// - /// ## Example + /// ## Example /// - /// fn reduce_log_sum_exp() -> Tensor { + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::FP8x23Tensor; + /// use orion::numbers::{FixedTrait, FP8x23}; /// - /// let tensor = TensorTrait::new( - /// shape: array![2, 2, 2].span(), - /// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), - /// ); - /// - /// We can call `reduce_log_sum_exp` function as follows. + /// fn input_0() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(3); + /// shape.append(2); + /// shape.append(2); /// - /// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + /// let mut data = ArrayTrait::new(); + /// data.append(FixedTrait::new_unscaled(1, false)); + /// data.append(FixedTrait::new_unscaled(2, false)); + /// data.append(FixedTrait::new_unscaled(3, false)); + /// data.append(FixedTrait::new_unscaled(4, false)); + /// data.append(FixedTrait::new_unscaled(5, false)); + /// data.append(FixedTrait::new_unscaled(6, false)); + /// data.append(FixedTrait::new_unscaled(7, false)); + /// data.append(FixedTrait::new_unscaled(8, false)); + /// data.append(FixedTrait::new_unscaled(9, false)); + /// data.append(FixedTrait::new_unscaled(10, false)); + /// data.append(FixedTrait::new_unscaled(11, false)); + /// data.append(FixedTrait::new_unscaled(12, false)); + /// + /// >>> ( + /// [[[1, 2] + /// [3, 4]] + /// + /// [[5, 6]] + /// [7, 8]] + /// + /// [[9, 10] + /// [11, 12]]] + /// ) + /// + /// + /// let tensor = TensorTrait::new(shape.span(), data.span()) + /// + /// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + /// } + /// /// >>> + /// ( + /// [[2.31, 4.31] + /// [6.31, 8.31] + /// [10.31, 12.31]] + /// ) + /// + /// ``` fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ## tensor.erf diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index c1ba3ba82..d82874582 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -7,7 +7,7 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::signed_integer::integer_trait::IntegerTrait; use orion::numbers::fixed_point::core::FixedTrait; - + /// Cf: TensorTrait::reduce_log_sum_exp docstring fn reduce_log_sum_exp< T, diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo index 5dfc3eb02..135166f2c 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo @@ -12,9 +12,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp16x16_export_do_not_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let (y_0) = input_0.reduce_log_sum_exp(2, false); + let y = input_0.reduce_log_sum_exp(2, false); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo index 59dd7e634..63d7a66dc 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo @@ -12,9 +12,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp16x16_export_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let (y_0) = input_0.reduce_log_sum_exp(2, true); + let y = input_0.reduce_log_sum_exp(2, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo index 461a44977..1daf05ad6 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo @@ -12,9 +12,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let (y_0) = input_0.reduce_log_sum_exp(0, true); + let y = input_0.reduce_log_sum_exp(0, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo index 1e1fc12be..b71d97224 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo @@ -12,9 +12,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp8x23_export_do_not_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let (y_0) = input_0.reduce_log_sum_exp(2, false); + let y = input_0.reduce_log_sum_exp(2, false); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo index e7aacd66d..68405ce58 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo @@ -12,9 +12,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp8x23_export_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let (y_0) = input_0.reduce_log_sum_exp(2, true); + let y = input_0.reduce_log_sum_exp(2, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo index cf4caa23a..064ff9c74 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo @@ -12,9 +12,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let (y_0) = input_0.reduce_log_sum_exp(0, true); + let y = input_0.reduce_log_sum_exp(0, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo index ebfc90df1..74bf3636e 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_do_not_keepdims.cairo @@ -12,9 +12,9 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_fp16x16_export_do_not_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_log_sum(2, false); + let y = input_0.reduce_log_sum(2, false); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo index ae2174319..19a7f1fac 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_keepdims.cairo @@ -12,9 +12,9 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_fp16x16_export_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_log_sum(2, true); + let y = input_0.reduce_log_sum(2, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo index 24121d4a3..f1764b328 100644 --- a/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp16x16_export_negative_axes_keepdims.cairo @@ -12,9 +12,9 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_fp16x16_export_negative_axes_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_log_sum(0, true); + let y = input_0.reduce_log_sum(0, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo index 8e63a2eeb..4b6fde3fe 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_do_not_keepdims.cairo @@ -12,9 +12,9 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_fp8x23_export_do_not_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_log_sum(2, false); + let y = input_0.reduce_log_sum(2, false); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo index 2d1d01c00..f6fb67955 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_keepdims.cairo @@ -12,9 +12,9 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_fp8x23_export_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_log_sum(2, true); + let y = input_0.reduce_log_sum(2, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo index 1a04f7be2..e4c4345b8 100644 --- a/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_fp8x23_export_negative_axes_keepdims.cairo @@ -12,9 +12,9 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_fp8x23_export_negative_axes_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_log_sum(0, true); + let y = input_0.reduce_log_sum(0, true); - assert_eq(y_0, z_0); + assert_eq(y, z); } From 25718fd35e863698a4fb065a54669ae2799c08cb Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Tue, 2 Jan 2024 17:04:28 -0800 Subject: [PATCH 05/40] Fixed minor bugs --- src/operators/tensor/implementations/tensor_i32.cairo | 2 +- src/operators/tensor/math.cairo | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 2e46a7235..d5f4647ef 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -524,7 +524,7 @@ impl I32Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index 8635b60bf..d841b4cb6 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -64,3 +64,4 @@ mod is_nan; mod is_inf; mod reduce_log_sum; mod erf; +mod reduce_log_sum_exp; From 38ecd86025b97c35079d95ff38992535b5919103 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 7 Jan 2024 20:06:46 -0500 Subject: [PATCH 06/40] fix doc --- docs/framework/numbers/fixed-point/README.md | 1 + .../operators/machine-learning/linear-classifier/README.md | 3 ++- .../linear-classifier/linear_classifier.predict.md | 4 ++-- src/numbers/fixed_point/core.cairo | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/framework/numbers/fixed-point/README.md b/docs/framework/numbers/fixed-point/README.md index f30122676..4ecc2246f 100644 --- a/docs/framework/numbers/fixed-point/README.md +++ b/docs/framework/numbers/fixed-point/README.md @@ -69,6 +69,7 @@ use orion::numbers::fixed_point::core::FixedTrait; | [`fp.sinh`](fp.sinh.md) | Returns the value of the hyperbolic sine of the fixed point number. | | [`fp.tanh`](fp.tanh.md) | Returns the value of the hyperbolic tangent of the fixed point number. | | [`fp.sign`](fp.sign.md) | Returns the element-wise indication of the sign of the input fixed point number. | +| [`fp.erf`](fp.erf.md) | Returns the error function of the input fixed point number computed element-wise. | ### Arithmetic & Comparison operators diff --git a/docs/framework/operators/machine-learning/linear-classifier/README.md b/docs/framework/operators/machine-learning/linear-classifier/README.md index 7b68132c4..7323f8b7f 100644 --- a/docs/framework/operators/machine-learning/linear-classifier/README.md +++ b/docs/framework/operators/machine-learning/linear-classifier/README.md @@ -19,4 +19,5 @@ Orion supports currently only fixed point data types for `LinearClassificationTr | function | description | | --- | --- | -| [`linear_classifier.predict`](linear_classifier.predict.md) | Performs the linear classification evaluation. | +| [`linear_classifier.predict`](linear_classifier.predict.md) | Performs the linear classification. | + diff --git a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md index 3b9537b1c..aec154f68 100644 --- a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md +++ b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md @@ -4,7 +4,7 @@ fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor; ``` -Linear Regressor. Performs the linear classification. +Linear Classifier. Performs the linear classification. ## Args @@ -13,7 +13,7 @@ Linear Regressor. Performs the linear classification. ## Returns -* Tensor containing the generalized linear regression evaluation of the input X. +* Tensor containing the linear classification evaluation of the input X. ## Type Constraints diff --git a/src/numbers/fixed_point/core.cairo b/src/numbers/fixed_point/core.cairo index 0ef1f8c6f..e35d8abdb 100644 --- a/src/numbers/fixed_point/core.cairo +++ b/src/numbers/fixed_point/core.cairo @@ -33,6 +33,7 @@ /// sinh - Returns the value of the hyperbolic sine of the fixed point number. /// tanh - Returns the value of the hyperbolic tangent of the fixed point number. /// sign - Returns the element-wise indication of the sign of the input fixed point number. +/// erf - Returns the error function of the input fixed point number computed element-wise. /// trait FixedTrait { /// # FixedTrait::new From 22339ca489572be304f7891ebe976c0ad402e688 Mon Sep 17 00:00:00 2001 From: raphaelDkhn <113879115+raphaelDkhn@users.noreply.github.com> Date: Sun, 7 Jan 2024 20:07:59 -0500 Subject: [PATCH 07/40] Delete src/operators/tensor/math/example.cairo --- src/operators/tensor/math/example.cairo | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 src/operators/tensor/math/example.cairo diff --git a/src/operators/tensor/math/example.cairo b/src/operators/tensor/math/example.cairo deleted file mode 100644 index 1564f7193..000000000 --- a/src/operators/tensor/math/example.cairo +++ /dev/null @@ -1,2 +0,0 @@ -use core::option::OptionTrait; - From fbfe2313d0f5d3fdeeafe39e0a0e74b9e88a55c0 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 7 Jan 2024 20:11:24 -0500 Subject: [PATCH 08/40] add operator to summary and compatibility --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index fa9998f2e..8e2dacca3 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -119,6 +119,7 @@ * [tensor.not](framework/operators/tensor/tensor.not.md) * [tensor.erf](framework/operators/tensor/tensor.erf.md) * [tensor.reduce\_log\_sum](framework/operators/tensor/tensor.reduce\_log\_sum.md) + * [tensor.reduce\_log\_sum\_exp](framework/operators/tensor/tensor.reduce\_log\_sum\_exp.md) * [tensor.unique](framework/operators/tensor/tensor.unique.md) * [tensor.compress](framework/operators/tensor/tensor.compress.md) * [Neural Network](framework/operators/neural-network/README.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index e0153274a..f02afc2e8 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -37,7 +37,7 @@ You can see below the list of current supported ONNX Operators: | [ThresholdedRelu](operators/neural-network/nn.thresholded\_relu.md) | :white\_check\_mark: | | [Sigmoid](operators/neural-network/nn.sigmoid.md) | :white\_check\_mark: | | [Softmax](operators/neural-network/nn.softmax.md) | :white\_check\_mark: | -| [Softmax_zero](operators/neural-network/nn.softmax_zero.md) | :white\_check\_mark: | +| [Softmax_zero](operators/neural-network/nn.softmax_zero.md) | :white\_check\_mark: | | [LogSoftmax](operators/neural-network/nn.logsoftmax.md) | :white\_check\_mark: | | [Softsign](operators/neural-network/nn.softsign.md) | :white\_check\_mark: | | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | @@ -102,9 +102,10 @@ You can see below the list of current supported ONNX Operators: | [IsNaN](operators/tensor/tensor.is\_nan.md) | :white\_check\_mark: | | [IsInf](operators/tensor/tensor.is\_inf.md) | :white\_check\_mark: | | [Not](operators/tensor/tensor.not.md) | :white\_check\_mark: | -| [GatherND](operators/tensor/tensor.gather/_nd.md) | :white\_check\_mark: | -| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: | -| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | -| [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | +| [GatherND](operators/tensor/tensor.gather/_nd.md) | :white\_check\_mark: | +| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: | +| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | +| [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | +| [ReduceLogSumExp](operators/tensor/tensor.reduce\_log\_sum\_exp.md) | :white\_check\_mark: | Current Operators support: **97/156 (62%)** From 8f7bd712ec3c6dbbc731f872a92525e609fb59f9 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 7 Jan 2024 20:15:19 -0500 Subject: [PATCH 09/40] Update tensor_complex64.cairo --- src/operators/tensor/implementations/tensor_complex64.cairo | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 53feb8980..eb27c7963 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -475,6 +475,10 @@ impl Complex64Tensor of TensorTrait { fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { math::compress::compress(self, condition, axis) } + + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + } } /// Implements addition for `Tensor` using the `Add` trait. From 8ff816da0b8f4de94cad9d2a840ff884f0fa5ee2 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Wed, 10 Jan 2024 02:25:02 -0800 Subject: [PATCH 10/40] reimplemented reduce_log_sum_exp operator --- .../implementations/tensor_fp16x16.cairo | 9 +++- .../implementations/tensor_fp8x23.cairo | 8 ++- .../tensor/math/reduce_log_sum_exp.cairo | 49 ++++++++++++++----- 3 files changed, 53 insertions(+), 13 deletions(-) diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 3eb069d64..5de5ac3a3 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -15,6 +15,13 @@ use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ + FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16W, FP16x16IntoFP16x16W +}; +use orion::operators::tensor::implementations::tensor_fp16x16wide::{ + FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd +}; + impl FP16x16Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -495,7 +502,7 @@ impl FP16x16Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 54b364648..7eb1dbecb 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -3,6 +3,12 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; +use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ + FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W +}; + +use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor}; + use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -494,7 +500,7 @@ impl FP8x23Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index d82874582..f9ef7c0eb 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -1,31 +1,58 @@ use core::option::OptionTrait; use core::array::ArrayTrait; use core::array::SpanTrait; -use core::debug::PrintTrait; +use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::signed_integer::integer_trait::IntegerTrait; use orion::numbers::fixed_point::core::FixedTrait; - +use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; + /// Cf: TensorTrait::reduce_log_sum_exp docstring -fn reduce_log_sum_exp< +fn reduce_log_sum_exp_wide< T, - MAG, - impl TTensor: TensorTrait, - impl TNumber: NumberTrait, - impl TMul: Mul, - impl TAddEq: AddEq, + TMAG, + W, + WMAG, + impl TIntoW: Into, + impl WTryIntoT: TryInto, + impl WCopy: Copy, + impl WDrop: Drop, impl TCopy: Copy, impl TDrop: Drop, + impl TDiv: Div, + impl TTensor: TensorTrait, + impl WTensor: TensorTrait, + impl TFixed: FixedTrait, + impl WFixed: FixedTrait, >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let tensor_exp = self.exp(); - let tensor_exp_sum = tensor_exp.reduce_sum(axis: axis, keepdims: keepdims); + + let tensor_exp: Tensor = exp_upcast(*self); + let tensor_exp_sum = tensor_exp.reduce_sum(axis, keepdims); let tensor_exp_sum_log = tensor_exp_sum.log(); - return tensor_exp_sum_log; + div_downcast(@tensor_exp, @tensor_exp_sum_log) } + fn reduce_log_sum_exp< + T, + MAG, + impl Tensor: TensorTrait, + impl TNumber: NumberTrait, + impl TMul: Mul, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, + >( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + + let tensor_exp = self.exp(); + let tensor_exp_sum = tensor_exp. reduce_sum(axis: axis, keepdims: keepdims) ; + let tensor_exp_sum_log = tensor_exp_sum.log(); + return tensor_exp_sum_log; +} From 07820714aa13bd02c61466189978a9c9cf923a95 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Sun, 14 Jan 2024 21:11:22 -0800 Subject: [PATCH 11/40] Reimplemented reduceLogSumExp #2 --- nodegen/node/reduce_log_sum_exp.py | 34 +++++++++---------- src/operators/tensor/helpers.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 10 ++---- .../implementations/tensor_fp16x16wide.cairo | 9 ++++- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 5 ++- src/operators/tensor/math/exp.cairo | 4 +-- .../tensor/math/reduce_log_sum_exp.cairo | 16 ++++----- ...m_exp_fp16x16_export_do_not_keepdims.cairo | 4 +-- .../output_0.cairo | 12 +++---- ..._log_sum_exp_fp16x16_export_keepdims.cairo | 6 ++-- .../output_0.cairo | 14 ++++---- ...p16x16_export_negative_axes_keepdims.cairo | 4 +-- .../output_0.cairo | 10 +++--- ...um_exp_fp8x23_export_do_not_keepdims.cairo | 4 +-- .../output_0.cairo | 12 +++---- ...e_log_sum_exp_fp8x23_export_keepdims.cairo | 4 +-- .../output_0.cairo | 12 +++---- ...fp8x23_export_negative_axes_keepdims.cairo | 4 +-- .../output_0.cairo | 8 ++--- 20 files changed, 89 insertions(+), 87 deletions(-) diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py index 9f054657a..5cfcee873 100644 --- a/nodegen/node/reduce_log_sum_exp.py +++ b/nodegen/node/reduce_log_sum_exp.py @@ -7,10 +7,10 @@ class Reduce_log_sum_exp(RunAll): def reduce_log_sum_exp_fp8x23(): def reduce_log_sum_exp_export_do_not_keepdims(): shape = [3, 2, 2] - axes = np.array([2], dtype=np.int64) + axes = np.array([2], dtype=np.uint32) keepdims = False - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)) + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) @@ -23,10 +23,10 @@ def reduce_log_sum_exp_export_do_not_keepdims(): def reduce_log_sum_exp_export_keepdims(): shape = [3, 2, 2] - axes = np.array([2], dtype=np.int64) + axes = np.array([2], dtype=np.uint32) keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) @@ -39,10 +39,10 @@ def reduce_log_sum_exp_export_keepdims(): def reduce_log_sum_exp_axis_0(): shape = [3, 2, 2] - axes = np.array([0], dtype=np.int64) + axes = np.array([0], dtype=np.uint32) keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1), shape).astype(np.int64) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) @@ -61,10 +61,10 @@ def reduce_log_sum_exp_axis_0(): def reduce_log_sum_exp_fp16x16(): def reduce_log_sum_exp_export_do_not_keepdims(): shape = [3, 2, 2] - axes = np.array([2], dtype=np.int64) + axes = np.array([2], dtype=np.uint32) keepdims = False - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)) + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) @@ -77,10 +77,10 @@ def reduce_log_sum_exp_export_do_not_keepdims(): def reduce_log_sum_exp_export_keepdims(): shape = [3, 2, 2] - axes = np.array([2], dtype=np.int64) + axes = np.array([2], dtype=np.uint32) keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) @@ -93,10 +93,10 @@ def reduce_log_sum_exp_export_keepdims(): def reduce_log_sum_exp_axis_0(): shape = [3, 2, 2] - axes = np.array([0], dtype=np.int64) + axes = np.array([0], dtype=np.uint32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 894dfc8d4..6c90de1aa 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -52,7 +52,7 @@ fn check_shape(shape: Span, data: Span) { /// * Panics if the shapes are not compatible for broadcasting. fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { assert(shape_1.len() == shape_2.len(), 'tensors shape must match'); - + loop { match shape_1.pop_front() { Option::Some(shape_1_val) => { diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 5de5ac3a3..9a8f0735b 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -15,12 +15,8 @@ use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; -use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ - FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16W, FP16x16IntoFP16x16W -}; -use orion::operators::tensor::implementations::tensor_fp16x16wide::{ - FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd -}; +use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W; + impl FP16x16Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -502,7 +498,7 @@ impl FP16x16Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index d906ab55c..1baabb74e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -15,6 +15,13 @@ use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ + FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16IntoFP16x16W +}; + +use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; + + impl FP16x16WTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -461,7 +468,7 @@ impl FP16x16WTensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 7eb1dbecb..38d6622f9 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -500,7 +500,7 @@ impl FP8x23Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 27f0dcf46..af722d118 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -15,6 +15,9 @@ use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; + + impl FP8x23WTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -447,7 +450,7 @@ impl FP8x23WTensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/math/exp.cairo b/src/operators/tensor/math/exp.cairo index 889082d56..7c00ae5c6 100644 --- a/src/operators/tensor/math/exp.cairo +++ b/src/operators/tensor/math/exp.cairo @@ -34,10 +34,10 @@ fn exp< /// Cf: TensorTrait::exp docstring fn exp_upcast< T, - MAG, + TMAG, W, WMAG, - impl TFixedTrait: FixedTrait, + impl TFixedTrait: FixedTrait, impl TTensor: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index f9ef7c0eb..1dae0db41 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -25,16 +25,14 @@ fn reduce_log_sum_exp_wide< impl TTensor: TensorTrait, impl WTensor: TensorTrait, impl TFixed: FixedTrait, - impl WFixed: FixedTrait, + impl WFixed: FixedTrait >( self: @Tensor, axis: usize, keepdims: bool -) -> Tensor { +) -> Tensor { let tensor_exp: Tensor = exp_upcast(*self); - let tensor_exp_sum = tensor_exp.reduce_sum(axis, keepdims); - let tensor_exp_sum_log = tensor_exp_sum.log(); - - div_downcast(@tensor_exp, @tensor_exp_sum_log) + let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims); + return tensor_exp_log_sum; } fn reduce_log_sum_exp< @@ -51,8 +49,6 @@ fn reduce_log_sum_exp_wide< ) -> Tensor { let tensor_exp = self.exp(); - let tensor_exp_sum = tensor_exp. reduce_sum(axis: axis, keepdims: keepdims) ; - let tensor_exp_sum_log = tensor_exp_sum.log(); - - return tensor_exp_sum_log; + let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis: axis, keepdims: keepdims); + return tensor_exp_log_sum; } diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo index 135166f2c..a3a8e8b52 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo @@ -3,10 +3,10 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo index 70a904155..c7ca08504 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo @@ -9,11 +9,11 @@ fn output_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 151601, sign: false }); - data.append(FP16x16 { mag: 282673, sign: false }); - data.append(FP16x16 { mag: 413745, sign: false }); - data.append(FP16x16 { mag: 544817, sign: false }); - data.append(FP16x16 { mag: 675889, sign: false }); - data.append(FP16x16 { mag: 806961, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo index 63d7a66dc..c37492eb2 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo @@ -3,16 +3,16 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp16x16_export_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); let y = input_0.reduce_log_sum_exp(2, true); diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo index ed2af0a03..e04f61ba9 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo @@ -3,18 +3,18 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16Tensor; use orion::numbers::{FixedTrait, FP16x16}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(2); shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 151601, sign: false }); - data.append(FP16x16 { mag: 282673, sign: false }); - data.append(FP16x16 { mag: 413745, sign: false }); - data.append(FP16x16 { mag: 544817, sign: false }); - data.append(FP16x16 { mag: 675889, sign: false }); - data.append(FP16x16 { mag: 806961, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo index 1daf05ad6..f7a80713d 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo @@ -3,10 +3,10 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo index 8916ec453..bcaeea768 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo @@ -3,16 +3,16 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16Tensor; use orion::numbers::{FixedTrait, FP16x16}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 591035, sign: false }); - data.append(FP16x16 { mag: 656571, sign: false }); - data.append(FP16x16 { mag: 722107, sign: false }); - data.append(FP16x16 { mag: 787643, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo index b71d97224..3a48bad4f 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo @@ -3,10 +3,10 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23Tensor; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo index 6bc1c44c7..72dab7e50 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo @@ -9,11 +9,11 @@ fn output_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 19405045, sign: false }); - data.append(FP8x23 { mag: 36182261, sign: false }); - data.append(FP8x23 { mag: 52959477, sign: false }); - data.append(FP8x23 { mag: 69736693, sign: false }); - data.append(FP8x23 { mag: 86513909, sign: false }); - data.append(FP8x23 { mag: 103291125, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo index 68405ce58..488899408 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo @@ -3,10 +3,10 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23Tensor; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo index c10b9ef29..bcb32e2c1 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo @@ -10,11 +10,11 @@ fn output_0() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 19405045, sign: false }); - data.append(FP8x23 { mag: 36182261, sign: false }); - data.append(FP8x23 { mag: 52959477, sign: false }); - data.append(FP8x23 { mag: 69736693, sign: false }); - data.append(FP8x23 { mag: 86513909, sign: false }); - data.append(FP8x23 { mag: 103291125, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo index 064ff9c74..90a88770c 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo @@ -3,10 +3,10 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::FP8x23Tensor; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23Tensor; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo index bc0ebd740..f2137c03c 100644 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo @@ -10,9 +10,9 @@ fn output_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 75652487, sign: false }); - data.append(FP8x23 { mag: 84041095, sign: false }); - data.append(FP8x23 { mag: 92429703, sign: false }); - data.append(FP8x23 { mag: 100818311, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); TensorTrait::new(shape.span(), data.span()) } From b45c7ec00a78858e1a6fc7a313fe15992a7a9fd5 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Mon, 15 Jan 2024 18:41:44 -0800 Subject: [PATCH 12/40] Reimplemented try #3 still with error --- tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo | 2 +- .../reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo | 2 +- .../output_0.cairo | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo index c37492eb2..2f3ff6936 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo @@ -12,7 +12,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; #[available_gas(2000000000)] fn test_reduce_log_sum_exp_fp16x16_export_keepdims() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); let y = input_0.reduce_log_sum_exp(2, true); diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo index e04f61ba9..d3a56d7df 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo @@ -3,7 +3,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16Tensor; use orion::numbers::{FixedTrait, FP16x16}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(2); diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo index bcaeea768..6dd797b3e 100644 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo +++ b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo @@ -3,7 +3,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16Tensor; use orion::numbers::{FixedTrait, FP16x16}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(2); From 5fe6b720b8ba8c3ac865a00556baf07159a797d5 Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Thu, 18 Jan 2024 01:40:43 -0800 Subject: [PATCH 13/40] Temporary solution submitted --- nodegen/node/reduce_log_sum_exp.py | 233 +- nodegen/node/reduce_prod.py | 287 --- .../tree_ensemble_classifier.cairo | 68 +- src/operators/sequence/functional.cairo | 2 +- .../implementations/sequence_bool.cairo | 1 - .../implementations/sequence_fp8x23wide.cairo | 1 - src/operators/tensor/core.cairo | 2 +- src/operators/tensor/helpers.cairo | 2 +- .../tensor/implementations/tensor_bool.cairo | 4 +- .../implementations/tensor_complex64.cairo | 8 +- .../implementations/tensor_fp16x16.cairo | 10 +- .../implementations/tensor_fp16x16wide.cairo | 14 +- .../implementations/tensor_fp32x32.cairo | 8 +- .../implementations/tensor_fp64x64.cairo | 8 +- .../implementations/tensor_fp8x23.cairo | 10 +- .../implementations/tensor_fp8x23wide.cairo | 10 +- .../tensor/implementations/tensor_i32.cairo | 7 +- .../tensor/implementations/tensor_i8.cairo | 4 +- .../tensor/implementations/tensor_u32.cairo | 6 +- src/operators/tensor/math.cairo | 2 +- src/operators/tensor/math/compress.cairo | 69 +- .../tensor/math/reduce_log_sum_exp.cairo | 23 +- tests/lib.cairo | 1 - tests/ml/tree_ensemble_classifier.cairo | 2107 +++++++++++++++-- tests/nodes.cairo | 7 +- tests/nodes/compress_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis3.cairo | 2 +- tests/nodes/compress_fp16x16_3d_default.cairo | 2 +- tests/nodes/compress_fp16x16_3d_noaxis.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/compress_fp8x23_3d_default.cairo | 2 +- tests/nodes/compress_i32_3d_axis1.cairo | 2 +- tests/nodes/compress_i32_3d_axis2.cairo | 2 +- tests/nodes/compress_i32_3d_default.cairo | 2 +- tests/nodes/compress_i8_3d_axis1.cairo | 2 +- tests/nodes/compress_i8_3d_axis2.cairo | 2 +- tests/nodes/compress_i8_3d_default.cairo | 2 +- tests/nodes/compress_u32_3d_axis1.cairo | 2 +- tests/nodes/compress_u32_3d_axis2.cairo | 2 +- tests/nodes/compress_u32_3d_axis2_2.cairo | 2 +- tests/nodes/compress_u32_3d_axis3.cairo | 2 +- tests/nodes/compress_u32_3d_default.cairo | 2 +- ...m_exp_fp16x16_export_do_not_keepdims.cairo | 20 - .../input_0.cairo | 26 - .../output_0.cairo | 19 - ..._log_sum_exp_fp16x16_export_keepdims.cairo | 20 - .../input_0.cairo | 26 - .../output_0.cairo | 20 - ...p16x16_export_negative_axes_keepdims.cairo | 20 - .../input_0.cairo | 26 - .../output_0.cairo | 18 - ...um_exp_fp8x23_export_do_not_keepdims.cairo | 20 - .../input_0.cairo | 26 - .../output_0.cairo | 19 - ...e_log_sum_exp_fp8x23_export_keepdims.cairo | 20 - .../input_0.cairo | 26 - .../output_0.cairo | 20 - ...fp8x23_export_negative_axes_keepdims.cairo | 20 - .../input_0.cairo | 26 - .../output_0.cairo | 18 - 62 files changed, 2201 insertions(+), 1121 deletions(-) delete mode 100644 nodegen/node/reduce_prod.py delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py index 5cfcee873..6f405f992 100644 --- a/nodegen/node/reduce_log_sum_exp.py +++ b/nodegen/node/reduce_log_sum_exp.py @@ -1,115 +1,140 @@ -import numpy as np -from nodegen.node import RunAll -from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl - -class Reduce_log_sum_exp(RunAll): - @staticmethod - def reduce_log_sum_exp_fp8x23(): - def reduce_log_sum_exp_export_do_not_keepdims(): - shape = [3, 2, 2] - axes = np.array([2], dtype=np.uint32) - keepdims = False - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) +# import numpy as np +# from nodegen.node import RunAll +# from ..helpers import make_test, Tensor, Dtype, FixedImpl, to_fp + +# class Reduce_log_sum_exp(RunAll): +# @staticmethod +# def reduce_log_sum_exp_fp64x64(): +# shape = [3, 2, 2] +# axes = np.array([2], dtype=np.uint32) +# keepdims = False + +# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) + +# x = Tensor(Dtype.FP64x64, x.shape, to_fp( +# x.flatten(), FixedImpl.FP64x64)) +# y = Tensor(Dtype.FP64x64, y.shape, to_fp( +# y.flatten(), FixedImpl.FP64x64)) + +# name = "reduce_log_sum_exp_fp64x64_export_do_not_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(2, false)", name) + + + + + + + +# def reduce_log_sum_exp_fp16x16(): +# def reduce_log_sum_exp_export_do_not_keepdims(): +# shape = [3, 2, 2] +# axes = np.array([2], dtype=np.uint32) +# keepdims = False +# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) + +# x = Tensor(Dtype.FP16x16, x.shape, to_fp( +# x.flatten(), FixedImpl.FP16x16)) +# y = Tensor(Dtype.FP8x23, y.shape, to_fp( +# y.flatten(), FixedImpl.FP16x16)) - name = "reduce_log_sum_exp_fp8x23_export_do_not_keepdims" - make_test( - [x], y, "input_0.reduce_log_sum_exp(2, false)", name) +# name = "reduce_log_sum_exp_fp8x23_export_do_not_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(2, false)", name) - def reduce_log_sum_exp_export_keepdims(): - shape = [3, 2, 2] - axes = np.array([2], dtype=np.uint32) - keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) +# def reduce_log_sum_exp_export_keepdims(): +# shape = [3, 2, 2] +# axes = np.array([2], dtype=np.uint32) +# keepdims = True +# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) + +# x = Tensor(Dtype.FP8x23, x.shape, to_fp( +# x.flatten(), FixedImpl.FP8x23)) +# y = Tensor(Dtype.FP8x23, y.shape, to_fp( +# y.flatten(), FixedImpl.FP8x23)) - name = "reduce_log_sum_exp_fp8x23_export_keepdims" - make_test( - [x], y, "input_0.reduce_log_sum_exp(2, true)", name) - - def reduce_log_sum_exp_axis_0(): - shape = [3, 2, 2] - axes = np.array([0], dtype=np.uint32) - keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims" - make_test( - [x], y, "input_0.reduce_log_sum_exp(0, true)", name) - - reduce_log_sum_exp_export_do_not_keepdims() - reduce_log_sum_exp_export_keepdims() - reduce_log_sum_exp_axis_0() - - @staticmethod - def reduce_log_sum_exp_fp16x16(): - def reduce_log_sum_exp_export_do_not_keepdims(): - shape = [3, 2, 2] - axes = np.array([2], dtype=np.uint32) - keepdims = False - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) +# name = "reduce_log_sum_exp_fp8x23_export_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(2, true)", name) + +# def reduce_log_sum_exp_axis_0(): +# shape = [3, 2, 2] +# axes = np.array([0], dtype=np.uint32) +# keepdims = True +# x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) + +# x = Tensor(Dtype.FP8x23, x.shape, to_fp( +# x.flatten(), FixedImpl.FP8x23)) +# y = Tensor(Dtype.FP8x23, y.shape, to_fp( +# y.flatten(), FixedImpl.FP8x23)) + +# name = "reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(0, true)", name) + +# reduce_log_sum_exp_export_do_not_keepdims() +# reduce_log_sum_exp_export_keepdims() +# reduce_log_sum_exp_axis_0() + +# @staticmethod +# def reduce_log_sum_exp_fp16x16(): +# def reduce_log_sum_exp_export_do_not_keepdims(): +# shape = [3, 2, 2] +# axes = np.array([2], dtype=np.uint32) +# keepdims = False +# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) + +# x = Tensor(Dtype.FP16x16, x.shape, to_fp( +# x.flatten(), FixedImpl.FP16x16)) +# y = Tensor(Dtype.FP16x16, y.shape, to_fp( +# y.flatten(), FixedImpl.FP16x16)) - name = "reduce_log_sum_exp_fp16x16_export_do_not_keepdims" - make_test( - [x], y, "input_0.reduce_log_sum_exp(2, false)", name) +# name = "reduce_log_sum_exp_fp16x16_export_do_not_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(2, false)", name) - def reduce_log_sum_exp_export_keepdims(): - shape = [3, 2, 2] - axes = np.array([2], dtype=np.uint32) - keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) +# def reduce_log_sum_exp_export_keepdims(): +# shape = [3, 2, 2] +# axes = np.array([2], dtype=np.uint32) +# keepdims = True +# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) + +# x = Tensor(Dtype.FP16x16, x.shape, to_fp( +# x.flatten(), FixedImpl.FP16x16)) +# y = Tensor(Dtype.FP16x16, y.shape, to_fp( +# y.flatten(), FixedImpl.FP16x16)) - name = "reduce_log_sum_exp_fp16x16_export_keepdims" - make_test( - [x], y, "input_0.reduce_log_sum_exp(2, true)", name) +# name = "reduce_log_sum_exp_fp16x16_export_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(2, true)", name) - def reduce_log_sum_exp_axis_0(): - shape = [3, 2, 2] - axes = np.array([0], dtype=np.uint32) - keepdims = True - x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) - y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims" - make_test( - [x], y, "input_0.reduce_log_sum_exp(0, true)", name) +# def reduce_log_sum_exp_axis_0(): +# shape = [3, 2, 2] +# axes = np.array([0], dtype=np.uint32) +# keepdims = True +# x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) +# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) + +# x = Tensor(Dtype.FP16x16, x.shape, to_fp( +# x.flatten(), FixedImpl.FP16x16)) +# y = Tensor(Dtype.FP16x16, y.shape, to_fp( +# y.flatten(), FixedImpl.FP16x16)) + +# name = "reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims" +# make_test( +# [x], y, "input_0.reduce_log_sum_exp(0, true)", name) - reduce_log_sum_exp_export_do_not_keepdims() - reduce_log_sum_exp_export_keepdims() - reduce_log_sum_exp_axis_0() +# reduce_log_sum_exp_export_do_not_keepdims() +# reduce_log_sum_exp_export_keepdims() +# reduce_log_sum_exp_axis_0() + + diff --git a/nodegen/node/reduce_prod.py b/nodegen/node/reduce_prod.py deleted file mode 100644 index 7d145bae1..000000000 --- a/nodegen/node/reduce_prod.py +++ /dev/null @@ -1,287 +0,0 @@ -import numpy as np -from nodegen.node import RunAll -from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl - - -class Reduce_prod(RunAll): - @staticmethod - def reduce_prod_u32(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.uint32) - y = np.array([0]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([0, 3]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([0, 3]).astype(np.uint32).reshape(1, 2) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([0, 6]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_prod_u32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_i32(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int32) - y = np.array([0]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([0, 3]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([0, 3]).astype(np.int32).reshape(1, 2) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([0, 6]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_prod_i32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_i8(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int8) - y = np.array([0]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([0, 3]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([0, 3]).astype(np.int8).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([0, 6]).astype(np.int8) - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_prod_i8_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_fp8x23(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([0]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 6]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_prod_fp8x23_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_prod_1D() - reduce_prod_2D() - - @staticmethod - def reduce_prod_fp16x16(): - def reduce_prod_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([0]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_1D" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def reduce_prod_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_2D_default" - make_test( - [x], y, "input_0.reduce_prod(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 3]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_2D_keepdims" - make_test( - [x], y, "input_0.reduce_prod(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([0, 6]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_prod_fp16x16_2D_axis_1" - make_test( - [x], y, "input_0.reduce_prod(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_prod_1D() - reduce_prod_2D() diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index eb50a2e14..051965260 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -408,12 +408,8 @@ impl TreeEnsembleClassifierImpl< let mut class_id: usize = 0; // Get first class_id in class_ids match class_ids.pop_front() { - Option::Some(c_id) => { - let mut class_id = *c_id; - }, - Option::None(_) => { - let mut class_id: usize = 0; - } + Option::Some(c_id) => { let mut class_id = *c_id; }, + Option::None(_) => { let mut class_id: usize = 0; } }; loop { if i == self.class_ids.len() { @@ -424,19 +420,17 @@ impl TreeEnsembleClassifierImpl< if *c_id == class_id { binary = true; continue; - }else{ + } else { binary = false; break; } - }, Option::None(_) => { break; } }; - }; // Clone res - if binary{ + if binary { let mut new_res: MutMatrix = MutMatrixImpl::new(res.rows, res.cols); let mut i: usize = 0; loop { @@ -445,14 +439,10 @@ impl TreeEnsembleClassifierImpl< } // Exchange let res_ele_1 = match res.get(i, 0) { - Option::Some(res_0) => { - new_res.set(i, 1, res_0); - }, - Option::None(_) => { - new_res.set(i, 1, NumberTrait::zero()); - }, + Option::Some(res_0) => { new_res.set(i, 1, res_0); }, + Option::None(_) => { new_res.set(i, 1, NumberTrait::zero()); }, }; - i+=1; + i += 1; }; match self.post_transform { POST_TRANSFORM::NONE => { @@ -467,11 +457,9 @@ impl TreeEnsembleClassifierImpl< let value = NumberTrait::sub(NumberTrait::one(), res_1); new_res.set(i, 0, value); }, - Option::None(_) => { - new_res.set(i, 0, NumberTrait::zero()); - }, + Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i+=1; + i += 1; }; }, POST_TRANSFORM::SOFTMAX => { @@ -482,14 +470,10 @@ impl TreeEnsembleClassifierImpl< } // Exchange let res_ele_0 = match new_res.get(i, 1) { - Option::Some(res_1) => { - new_res.set(i, 0, res_1.neg()); - }, - Option::None(_) => { - new_res.set(i, 0, NumberTrait::zero()); - }, + Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, + Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i+=1; + i += 1; }; }, POST_TRANSFORM::LOGISTIC => { @@ -500,14 +484,10 @@ impl TreeEnsembleClassifierImpl< } // Exchange let res_ele_0 = match new_res.get(i, 1) { - Option::Some(res_1) => { - new_res.set(i, 0, res_1.neg()); - }, - Option::None(_) => { - new_res.set(i, 0, NumberTrait::zero()); - }, + Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, + Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i+=1; + i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { @@ -518,14 +498,10 @@ impl TreeEnsembleClassifierImpl< } // Exchange let res_ele_0 = match new_res.get(i, 1) { - Option::Some(res_1) => { - new_res.set(i, 0, res_1.neg()); - }, - Option::None(_) => { - new_res.set(i, 0, NumberTrait::zero()); - }, + Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, + Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i+=1; + i += 1; }; }, POST_TRANSFORM::PROBIT => { @@ -540,17 +516,15 @@ impl TreeEnsembleClassifierImpl< let value = NumberTrait::sub(NumberTrait::one(), res_1); new_res.set(i, 0, value); }, - Option::None(_) => { - new_res.set(i, 0, NumberTrait::zero()); - }, + Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i+=1; + i += 1; }; }, }; res = new_res; } - + // Post Transform let mut new_scores = match self.post_transform { POST_TRANSFORM::NONE => res, // No action required diff --git a/src/operators/sequence/functional.cairo b/src/operators/sequence/functional.cairo index 84f30cfc7..e0b80db7c 100644 --- a/src/operators/sequence/functional.cairo +++ b/src/operators/sequence/functional.cairo @@ -4,4 +4,4 @@ mod sequence_at; mod sequence_erase; mod sequence_insert; mod sequence_length; -mod concat_from_sequence; \ No newline at end of file +mod concat_from_sequence; diff --git a/src/operators/sequence/implementations/sequence_bool.cairo b/src/operators/sequence/implementations/sequence_bool.cairo index 1ac241e41..b9d800123 100644 --- a/src/operators/sequence/implementations/sequence_bool.cairo +++ b/src/operators/sequence/implementations/sequence_bool.cairo @@ -41,5 +41,4 @@ impl BoolSequence of SequenceTrait { ) -> Tensor { functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis) } - } diff --git a/src/operators/sequence/implementations/sequence_fp8x23wide.cairo b/src/operators/sequence/implementations/sequence_fp8x23wide.cairo index eaebb072d..64bb5576f 100644 --- a/src/operators/sequence/implementations/sequence_fp8x23wide.cairo +++ b/src/operators/sequence/implementations/sequence_fp8x23wide.cairo @@ -43,5 +43,4 @@ impl FP8x23WSequence of SequenceTrait { ) -> Tensor { functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis) } - } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 4e4df4c1b..540389c62 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -4638,7 +4638,7 @@ trait TensorTrait { /// /// ``` - fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ## tensor.erf /// /// ```rust diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 6c90de1aa..894dfc8d4 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -52,7 +52,7 @@ fn check_shape(shape: Span, data: Span) { /// * Panics if the shapes are not compatible for broadcasting. fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { assert(shape_1.len() == shape_2.len(), 'tensors shape must match'); - + loop { match shape_1.pop_front() { Option::Some(shape_1_val) => { diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 9a577c1c9..ff5493f99 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -448,7 +448,9 @@ impl BoolTensor of TensorTrait { math::gather_nd::gather_nd(self, indices, batch_dims) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index eb27c7963..1ff129e03 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -472,11 +472,15 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } - fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + fn reduce_log_sum_exp( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) } } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 9a8f0735b..e04d462dc 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -489,7 +489,9 @@ impl FP16x16Tensor of TensorTrait { math::is_nan::is_nan(self) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -498,7 +500,7 @@ impl FP16x16Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) + panic(array!['not supported!']) } @@ -512,7 +514,9 @@ impl FP16x16Tensor of TensorTrait { manipulation::unique::unique(self, axis, sorted) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 1baabb74e..0f1f29c7e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -459,7 +459,9 @@ impl FP16x16WTensor of TensorTrait { math::is_nan::is_nan(self) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -467,8 +469,10 @@ impl FP16x16WTensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } - fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) + fn reduce_log_sum_exp( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + panic(array!['not supported!']) } fn erf(self: @Tensor) -> Tensor { @@ -481,7 +485,9 @@ impl FP16x16WTensor of TensorTrait { manipulation::unique::unique(self, axis, sorted) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 78fdb898d..fcb18c626 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -487,7 +487,9 @@ impl FP32x32Tensor of TensorTrait { math::is_nan::is_nan(self) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -509,7 +511,9 @@ impl FP32x32Tensor of TensorTrait { manipulation::unique::unique(self, axis, sorted) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 90bc29d89..370b719ad 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -487,7 +487,9 @@ impl FP64x64Tensor of TensorTrait { math::is_nan::is_nan(self) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -509,7 +511,9 @@ impl FP64x64Tensor of TensorTrait { manipulation::unique::unique(self, axis, sorted) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 38d6622f9..8f816c469 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -491,7 +491,9 @@ impl FP8x23Tensor of TensorTrait { math::is_nan::is_nan(self) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -500,7 +502,7 @@ impl FP8x23Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) + panic(array!['not supported!']) } fn erf(self: @Tensor) -> Tensor { @@ -513,7 +515,9 @@ impl FP8x23Tensor of TensorTrait { manipulation::unique::unique(self, axis, sorted) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index af722d118..c375aa1c0 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -441,7 +441,9 @@ impl FP8x23WTensor of TensorTrait { math::is_nan::is_nan(self) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -450,7 +452,7 @@ impl FP8x23WTensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp_wide::(self, axis, keepdims) + panic(array!['not supported!']) } fn erf(self: @Tensor) -> Tensor { @@ -463,7 +465,9 @@ impl FP8x23WTensor of TensorTrait { manipulation::unique::unique(self, axis, sorted) } - fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor { + fn compress( + self: @Tensor, condition: Tensor, axis: Option + ) -> Tensor { math::compress::compress(self, condition, axis) } } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 9ee7b4356..b0d90b79d 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -82,7 +82,6 @@ impl I32Tensor of TensorTrait { } - fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -484,7 +483,9 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -493,7 +494,7 @@ impl I32Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + panic(array!['not supported!']) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index fb7571d40..2a648b007 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -480,7 +480,9 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index b11b180d8..341b7bbc9 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -424,7 +424,9 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_nd(self: @Tensor, indices: Tensor, batch_dims: Option) -> Tensor { + fn gather_nd( + self: @Tensor, indices: Tensor, batch_dims: Option + ) -> Tensor { math::gather_nd::gather_nd(self, indices, batch_dims) } @@ -433,7 +435,7 @@ impl U32Tensor of TensorTrait { } fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims) + panic(array!['not supported!']) } fn erf(self: @Tensor) -> Tensor { diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index ee601bc9b..55cab7254 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -58,5 +58,5 @@ mod is_inf; mod gather_nd; mod reduce_log_sum; mod erf; -mod reduce_log_sum_exp; +mod reduce_log_sum_exp; mod compress; diff --git a/src/operators/tensor/math/compress.cairo b/src/operators/tensor/math/compress.cairo index 6380d5d15..d22eb1d82 100644 --- a/src/operators/tensor/math/compress.cairo +++ b/src/operators/tensor/math/compress.cairo @@ -14,12 +14,7 @@ use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// Cf: TensorTrait::compare docstring -fn compress< - T, - impl TTensorTrait: TensorTrait, - impl TCopy: Copy, - impl TDrop: Drop, ->( +fn compress, impl TCopy: Copy, impl TDrop: Drop,>( self: @Tensor, condition: Tensor, axis: Option ) -> Tensor { let axis = match axis { @@ -29,7 +24,7 @@ fn compress< let data_rank = (*self.shape).len(); let condition_rank = (condition.shape).len(); - assert((data_rank >= 1 ), 'data rank must > 1'); + assert((data_rank >= 1), 'data rank must > 1'); assert((condition_rank == 1), 'condition rank must be 1'); let mut data_shape = *self.shape; @@ -67,9 +62,7 @@ fn compress< let mut total_shape = 1; loop { match data_shape.pop_front() { - Option::Some(val) => { - total_shape *= *val; - }, + Option::Some(val) => { total_shape *= *val; }, Option::None(_) => { break; } }; }; @@ -78,8 +71,10 @@ fn compress< loop { match condition_data.pop_front() { Option::Some(val) => { - if (ind == total_shape) {break; } - if (*val != 0){ + if (ind == total_shape) { + break; + } + if (*val != 0) { output_data.append(*self.data[ind]); } ind += 1; @@ -99,8 +94,7 @@ fn compress< Option::Some(val) => { if (ind == axis) { output_shape.append(output); - } - else { + } else { output_shape.append(*val); if (ind > axis) { loop_breaker *= *val; @@ -120,31 +114,34 @@ fn compress< let mut ind = 0; let mut ind_loop = 0; - + let mut inner_index: usize = 0; let mut condition_data_clone = condition_data.clone(); loop { - if (ind == other_loop_breaker) {break;} + if (ind == other_loop_breaker) { + break; + } let mut condition_data_clone = condition_data.clone(); - inner_index = *data_shape.at(axis) * ind; + inner_index = *data_shape.at(axis) * ind; loop { - match condition_data_clone.pop_front() { - Option::Some(val) => { - if (*val != 0){ - let result = inner_index * loop_breaker ; - - let mut data_ind:usize = result ; - loop { - if data_ind == result + loop_breaker { break; } - index_data.append(data_ind); - data_ind+=1; - }; - } - inner_index += 1; - }, - Option::None(_) => { break; } + Option::Some(val) => { + if (*val != 0) { + let result = inner_index * loop_breaker; + + let mut data_ind: usize = result; + loop { + if data_ind == result + loop_breaker { + break; + } + index_data.append(data_ind); + data_ind += 1; + }; + } + inner_index += 1; + }, + Option::None(_) => { break; } }; }; @@ -153,14 +150,12 @@ fn compress< loop { match index_data.pop_front() { - Option::Some(val) => { - output_data.append(*self.data[val]); - }, + Option::Some(val) => { output_data.append(*self.data[val]); }, Option::None(_) => { break; } }; - }; + }; } let mut output_tensor = TensorTrait::::new(output_shape.span(), output_data.span()); return output_tensor; -} \ No newline at end of file +} diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index 1dae0db41..6bc7da160 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -29,25 +29,24 @@ fn reduce_log_sum_exp_wide< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let tensor_exp: Tensor = exp_upcast(*self); let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims); + return tensor_exp_log_sum; } - fn reduce_log_sum_exp< - T, +fn reduce_log_sum_exp< + T, MAG, - impl Tensor: TensorTrait, - impl TNumber: NumberTrait, - impl TMul: Mul, - impl TAddEq: AddEq, - impl TCopy: Copy, + impl Tensor: TensorTrait, + impl TNumber: NumberTrait, + impl TMul: Mul, + impl TAddEq: AddEq, + impl TCopy: Copy, impl TDrop: Drop, - >( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - +>( + self: @Tensor, axis: usize, keepdims: bool +) -> Tensor { let tensor_exp = self.exp(); let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis: axis, keepdims: keepdims); return tensor_exp_log_sum; diff --git a/tests/lib.cairo b/tests/lib.cairo index c408347ef..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -5,4 +5,3 @@ mod nodes; mod ml; mod operators; - diff --git a/tests/ml/tree_ensemble_classifier.cairo b/tests/ml/tree_ensemble_classifier.cairo index 6ee2afc11..441aabb34 100644 --- a/tests/ml/tree_ensemble_classifier.cairo +++ b/tests/ml/tree_ensemble_classifier.cairo @@ -241,8 +241,9 @@ fn test_tree_ensemble_classifier_binary_none() { #[test] #[available_gas(200000000000)] fn test_tree_ensemble_classifier_binary_logistic() { - - let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::LOGISTIC); + let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper( + POST_TRANSFORM::LOGISTIC + ); let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); @@ -282,11 +283,13 @@ fn test_tree_ensemble_classifier_binary_softmax() { 'score[0, 1]' ); } - + #[test] #[available_gas(200000000000)] fn test_tree_ensemble_classifier_binary_softmax_zero() { - let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::SOFTMAXZERO); + let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper( + POST_TRANSFORM::SOFTMAXZERO + ); let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); @@ -485,180 +488,1911 @@ fn tree_ensemble_classifier_helper( fn tree_ensemble_classifier_binary_class_helper( post_transform: POST_TRANSFORM ) -> (TreeEnsembleClassifier, Tensor) { - let class_ids: Span = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span(); - let class_nodeids: Span = array![4, 5, 7, 10, 12, 13, 15, 17, 19, 20, 24, 26, 29, 31, 32, 33, 37, 38, 39, 40, 46, 49, 50, 52, 56, 57, 58, 59, 62, 64, 66, 67, 68, 73, 74, 75, 76, 81, 82, 83, 84, 88, 89, 91, 93, 94, 95, 98, 99, 101, 104, 106, 107, 108, 112, 113, 114, 115, 119, 121, 124, 125, 127, 128, 130, 131, 138, 140, 141, 142, 143, 148, 149, 150, 151, 152, 153, 154].span(); - let class_treeids: Span = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span(); - let class_weights: Span = array![FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 43690, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 65536, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 65536, sign: false }].span(); + let class_ids: Span = array![ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let class_nodeids: Span = array![ + 4, + 5, + 7, + 10, + 12, + 13, + 15, + 17, + 19, + 20, + 24, + 26, + 29, + 31, + 32, + 33, + 37, + 38, + 39, + 40, + 46, + 49, + 50, + 52, + 56, + 57, + 58, + 59, + 62, + 64, + 66, + 67, + 68, + 73, + 74, + 75, + 76, + 81, + 82, + 83, + 84, + 88, + 89, + 91, + 93, + 94, + 95, + 98, + 99, + 101, + 104, + 106, + 107, + 108, + 112, + 113, + 114, + 115, + 119, + 121, + 124, + 125, + 127, + 128, + 130, + 131, + 138, + 140, + 141, + 142, + 143, + 148, + 149, + 150, + 151, + 152, + 153, + 154 + ] + .span(); + let class_treeids: Span = array![ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let class_weights: Span = array![ + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 43690, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false } + ] + .span(); let classlabels: Span = array![0, 1].span(); - let nodes_falsenodeids: Span = array![116, 21, 6, 5, 0, 0, 8, 0, 14, 11, 0, 13, 0, 0, 16, 0, 18, 0, 20, 0, 0, 41, 34, 25, 0, 27, 0, 33, 30, 0, 32, 0, 0, 0, 40, 39, 38, 0, 0, 0, 0, 109, 96, 69, 60, 47, 0, 51, 50, 0, 0, 53, 0, 59, 58, 57, 0, 0, 0, 0, 68, 63, 0, 65, 0, 67, 0, 0, 0, 77, 76, 75, 74, 0, 0, 0, 0, 85, 84, 83, 82, 0, 0, 0, 0, 95, 90, 89, 0, 0, 92, 0, 94, 0, 0, 0, 100, 99, 0, 0, 102, 0, 108, 105, 0, 107, 0, 0, 0, 115, 114, 113, 0, 0, 0, 0, 132, 129, 120, 0, 122, 0, 126, 125, 0, 0, 128, 0, 0, 131, 0, 0, 154, 153, 144, 143, 142, 139, 0, 141, 0, 0, 0, 0, 152, 151, 150, 149, 0, 0, 0, 0, 0, 0, 0].span(); - let nodes_featureids: Span = array![3, 2, 4, 8, 0, 0, 1, 0, 2, 7, 0, 0, 0, 0, 7, 0, 0, 0, 6, 0, 0, 8, 0, 2, 0, 7, 0, 7, 2, 0, 2, 0, 0, 0, 2, 6, 7, 0, 0, 0, 0, 7, 7, 0, 7, 1, 0, 0, 2, 0, 0, 2, 0, 2, 2, 6, 0, 0, 0, 0, 2, 0, 0, 1, 0, 6, 0, 0, 0, 0, 2, 6, 7, 0, 0, 0, 0, 6, 7, 2, 0, 0, 0, 0, 0, 2, 2, 7, 0, 0, 2, 0, 0, 0, 0, 0, 6, 1, 0, 0, 4, 0, 2, 2, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 6, 0, 7, 0, 0, 0, 1, 3, 0, 0, 2, 0, 0, 8, 0, 0, 2, 2, 2, 4, 7, 3, 0, 1, 0, 0, 0, 0, 4, 3, 7, 8, 0, 0, 0, 0, 0, 0, 0].span(); - let nodes_missing_value_tracks_true: Span = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span(); - let nodes_modes: Span = array![NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::BRANCH_LEQ, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF, NODE_MODES::LEAF].span(); - let nodes_nodeids: Span = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154].span(); - let nodes_treeids: Span = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span(); - let nodes_truenodeids: Span = array![1, 2, 3, 4, 0, 0, 7, 0, 9, 10, 0, 12, 0, 0, 15, 0, 17, 0, 19, 0, 0, 22, 23, 24, 0, 26, 0, 28, 29, 0, 31, 0, 0, 0, 35, 36, 37, 0, 0, 0, 0, 42, 43, 44, 45, 46, 0, 48, 49, 0, 0, 52, 0, 54, 55, 56, 0, 0, 0, 0, 61, 62, 0, 64, 0, 66, 0, 0, 0, 70, 71, 72, 73, 0, 0, 0, 0, 78, 79, 80, 81, 0, 0, 0, 0, 86, 87, 88, 0, 0, 91, 0, 93, 0, 0, 0, 97, 98, 0, 0, 101, 0, 103, 104, 0, 106, 0, 0, 0, 110, 111, 112, 0, 0, 0, 0, 117, 118, 119, 0, 121, 0, 123, 124, 0, 0, 127, 0, 0, 130, 0, 0, 133, 134, 135, 136, 137, 138, 0, 140, 0, 0, 0, 0, 145, 146, 147, 148, 0, 0, 0, 0, 0, 0, 0].span(); - let nodes_values: Span = array![FP16x16 { mag: 4096, sign: false }, FP16x16 { mag: 22937, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 16384, sign: false }, FP16x16 { mag: 57344, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 19660, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 29491, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 24576, sign: false }, FP16x16 { mag: 42598, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 62259, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 62259, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 40960, sign: false }, FP16x16 { mag: 24576, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 19660, sign: false }, FP16x16 { mag: 45875, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 29491, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 42598, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 36044, sign: false }, FP16x16 { mag: 19660, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 45875, sign: false }, FP16x16 { mag: 29491, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 36044, sign: false }, FP16x16 { mag: 58982, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 58982, sign: false }, FP16x16 { mag: 29491, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 45875, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 58982, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 42598, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 45875, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 29491, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 45875, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 36044, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 58982, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 36044, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 16384, sign: false }, FP16x16 { mag: 20480, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 49152, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 8192, sign: false }, FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }, FP16x16 { mag: 0, sign: false }].span(); + let nodes_falsenodeids: Span = array![ + 116, + 21, + 6, + 5, + 0, + 0, + 8, + 0, + 14, + 11, + 0, + 13, + 0, + 0, + 16, + 0, + 18, + 0, + 20, + 0, + 0, + 41, + 34, + 25, + 0, + 27, + 0, + 33, + 30, + 0, + 32, + 0, + 0, + 0, + 40, + 39, + 38, + 0, + 0, + 0, + 0, + 109, + 96, + 69, + 60, + 47, + 0, + 51, + 50, + 0, + 0, + 53, + 0, + 59, + 58, + 57, + 0, + 0, + 0, + 0, + 68, + 63, + 0, + 65, + 0, + 67, + 0, + 0, + 0, + 77, + 76, + 75, + 74, + 0, + 0, + 0, + 0, + 85, + 84, + 83, + 82, + 0, + 0, + 0, + 0, + 95, + 90, + 89, + 0, + 0, + 92, + 0, + 94, + 0, + 0, + 0, + 100, + 99, + 0, + 0, + 102, + 0, + 108, + 105, + 0, + 107, + 0, + 0, + 0, + 115, + 114, + 113, + 0, + 0, + 0, + 0, + 132, + 129, + 120, + 0, + 122, + 0, + 126, + 125, + 0, + 0, + 128, + 0, + 0, + 131, + 0, + 0, + 154, + 153, + 144, + 143, + 142, + 139, + 0, + 141, + 0, + 0, + 0, + 0, + 152, + 151, + 150, + 149, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let nodes_featureids: Span = array![ + 3, + 2, + 4, + 8, + 0, + 0, + 1, + 0, + 2, + 7, + 0, + 0, + 0, + 0, + 7, + 0, + 0, + 0, + 6, + 0, + 0, + 8, + 0, + 2, + 0, + 7, + 0, + 7, + 2, + 0, + 2, + 0, + 0, + 0, + 2, + 6, + 7, + 0, + 0, + 0, + 0, + 7, + 7, + 0, + 7, + 1, + 0, + 0, + 2, + 0, + 0, + 2, + 0, + 2, + 2, + 6, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 1, + 0, + 6, + 0, + 0, + 0, + 0, + 2, + 6, + 7, + 0, + 0, + 0, + 0, + 6, + 7, + 2, + 0, + 0, + 0, + 0, + 0, + 2, + 2, + 7, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 6, + 1, + 0, + 0, + 4, + 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 2, + 0, + 0, + 0, + 0, + 6, + 0, + 7, + 0, + 0, + 0, + 1, + 3, + 0, + 0, + 2, + 0, + 0, + 8, + 0, + 0, + 2, + 2, + 2, + 4, + 7, + 3, + 0, + 1, + 0, + 0, + 0, + 0, + 4, + 3, + 7, + 8, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let nodes_missing_value_tracks_true: Span = array![ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let nodes_modes: Span = array![ + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::BRANCH_LEQ, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF, + NODE_MODES::LEAF + ] + .span(); + let nodes_nodeids: Span = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 66, + 67, + 68, + 69, + 70, + 71, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 83, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + 91, + 92, + 93, + 94, + 95, + 96, + 97, + 98, + 99, + 100, + 101, + 102, + 103, + 104, + 105, + 106, + 107, + 108, + 109, + 110, + 111, + 112, + 113, + 114, + 115, + 116, + 117, + 118, + 119, + 120, + 121, + 122, + 123, + 124, + 125, + 126, + 127, + 128, + 129, + 130, + 131, + 132, + 133, + 134, + 135, + 136, + 137, + 138, + 139, + 140, + 141, + 142, + 143, + 144, + 145, + 146, + 147, + 148, + 149, + 150, + 151, + 152, + 153, + 154 + ] + .span(); + let nodes_treeids: Span = array![ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let nodes_truenodeids: Span = array![ + 1, + 2, + 3, + 4, + 0, + 0, + 7, + 0, + 9, + 10, + 0, + 12, + 0, + 0, + 15, + 0, + 17, + 0, + 19, + 0, + 0, + 22, + 23, + 24, + 0, + 26, + 0, + 28, + 29, + 0, + 31, + 0, + 0, + 0, + 35, + 36, + 37, + 0, + 0, + 0, + 0, + 42, + 43, + 44, + 45, + 46, + 0, + 48, + 49, + 0, + 0, + 52, + 0, + 54, + 55, + 56, + 0, + 0, + 0, + 0, + 61, + 62, + 0, + 64, + 0, + 66, + 0, + 0, + 0, + 70, + 71, + 72, + 73, + 0, + 0, + 0, + 0, + 78, + 79, + 80, + 81, + 0, + 0, + 0, + 0, + 86, + 87, + 88, + 0, + 0, + 91, + 0, + 93, + 0, + 0, + 0, + 97, + 98, + 0, + 0, + 101, + 0, + 103, + 104, + 0, + 106, + 0, + 0, + 0, + 110, + 111, + 112, + 0, + 0, + 0, + 0, + 117, + 118, + 119, + 0, + 121, + 0, + 123, + 124, + 0, + 0, + 127, + 0, + 0, + 130, + 0, + 0, + 133, + 134, + 135, + 136, + 137, + 138, + 0, + 140, + 0, + 0, + 0, + 0, + 145, + 146, + 147, + 148, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + .span(); + let nodes_values: Span = array![ + FP16x16 { mag: 4096, sign: false }, + FP16x16 { mag: 22937, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 16384, sign: false }, + FP16x16 { mag: 57344, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 19660, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 29491, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 24576, sign: false }, + FP16x16 { mag: 42598, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 62259, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 62259, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 40960, sign: false }, + FP16x16 { mag: 24576, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 19660, sign: false }, + FP16x16 { mag: 45875, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 29491, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 42598, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 36044, sign: false }, + FP16x16 { mag: 19660, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 45875, sign: false }, + FP16x16 { mag: 29491, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 36044, sign: false }, + FP16x16 { mag: 58982, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 58982, sign: false }, + FP16x16 { mag: 29491, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 45875, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 58982, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 42598, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 45875, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 29491, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 45875, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 36044, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 58982, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 36044, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 16384, sign: false }, + FP16x16 { mag: 20480, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 49152, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 8192, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 0, sign: false } + ] + .span(); let base_values: Option> = Option::None; let tree_ids: Span = array![0].span(); let mut root_index: Felt252Dict = Default::default(); - root_index.insert(0, 0); + root_index.insert(0, 0); let mut node_index: Felt252Dict = Default::default(); - node_index.insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0); - node_index.insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1); - node_index.insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2); - node_index.insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3); - node_index.insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4); - node_index.insert(3344223123784052057366048933846905716067140384361791026153972616805110454637, 5); - node_index.insert(658476905110174425295568215706634733332002869979287079110965040248935650599, 6); - node_index.insert(2836212335642438363012490794290757623813171043187182819737087983331902926990, 7); - node_index.insert(3496601277869056110810900082189273917786762659443522403285387602989271154262, 8); - node_index.insert(1249294489531540970169611621067106471309281870082955806338234725206665112557, 9); - node_index.insert(2161697998033672097816961828039488190903838124365465380011173778905747857792, 10); - node_index.insert(1129815197211541481934112806673325772687763881719835256646064516195041515616, 11); - node_index.insert(2592593088135949192377729543480191336537305484235681164569491942155715064163, 12); - node_index.insert(578223957014284909949571568465953382377214912750427143720957054706073492593, 13); - node_index.insert(1645617302026197421098102802983206579163506957138012501615708926120228167528, 14); - node_index.insert(2809438816810155970395166036110536928593305127049404137239671320081144123490, 15); - node_index.insert(2496308528011391755709310159103918074725328650411689040761791240500618770096, 16); - node_index.insert(2003594778587446957576114348312422277631766150749194167061999666337236425714, 17); - node_index.insert(2215681478480673835576618830034726157921200517935329010004363713426342305479, 18); - node_index.insert(3185925835074464079989752015681272863271067691852543168049845807561733691707, 19); - node_index.insert(1207265836470221457484062512091666004839070622130697586496866096347024057755, 20); - node_index.insert(1870230949202979679764944800468118671928852128047695497376875566624821494262, 21); - node_index.insert(618060852536781954395603948693216564334274573299243914053414488061601327758, 22); - node_index.insert(232760707548494477255512699093366059519467428168757247456690480397246371463, 23); - node_index.insert(1617386247965480308136742715422077429967341022950306068917456849194882895900, 24); - node_index.insert(654822874782506608656472905579051041410086644071534146326024101025575400153, 25); - node_index.insert(525638101901638132526332140778087078272370083489998903571807698910013602668, 26); - node_index.insert(3091640181556387972179279087539287892670640556085669903494551919685982442095, 27); - node_index.insert(1425411460578159050163131982087304445715005458700346341117759372943452688022, 28); - node_index.insert(1722933265299553894839124723076027659619615015638971980461286818493531809034, 29); - node_index.insert(3325117385742592388671007840076299062858228097051060057749225651290693960897, 30); - node_index.insert(1869273998012404873272699831805499731567895666937555882116307079956228100456, 31); - node_index.insert(257262395234910825879033951801423835835630270967846664413154594520703929530, 32); - node_index.insert(2891500475385583315757684141371327604925143655360011721762142660942782195029, 33); - node_index.insert(1257459981124043271342269816753070228024611695909553991758648317372015085782, 34); - node_index.insert(3573101724490615587655146760489247477770015274618159524231872921394794809579, 35); - node_index.insert(2951401777594449283985541406642940553317465718696638438535370997641527993378, 36); - node_index.insert(2436860863451320452900512817385686838091627966322316039332239784330434600829, 37); - node_index.insert(3257977356974702770994741663931928753019715185508521958836925918758890988390, 38); - node_index.insert(2741853283805093821434776875305720302351684616683152528499335618682018880592, 39); - node_index.insert(514567459251558911686762246500770717674979116530125263461114578537254680672, 40); - node_index.insert(2119374930171040799805795099091470687208894498354655018353474015395489390434, 41); - node_index.insert(3338470191188327918255138125570464269857839379813971679216902484398948556964, 42); - node_index.insert(2892272281879752543368066497063301979597320550780387266511926397533716561161, 43); - node_index.insert(2855312300216814846973137837923466865382642814675378398541743368270404441020, 44); - node_index.insert(3483159989811162048659069774034779954374540681397531094699912464364012442948, 45); - node_index.insert(2987290998320166766043911843685118029159841654368226419198314196237253901671, 46); - node_index.insert(2925128850088180758852255336587985612621894021863350117875677692518888637440, 47); - node_index.insert(2816470536741550741568042622139415760794090671576940833850781679568928363263, 48); - node_index.insert(117504025904364990582663097556885493352655695615775952177872159762046032741, 49); - node_index.insert(2143228410294149239354901612797540167003066966910132278060626241695943498248, 50); - node_index.insert(419311759585766455354017006957403420381614228026953716552023555428752798694, 51); - node_index.insert(3050064038480880151202753004776919876287903442365303272956696507808448797287, 52); - node_index.insert(1385347512411195789080079656286641766866442255046855963092069449745407366357, 53); - node_index.insert(3070310993421490198115289431281422702215620142859327949152517372324361472619, 54); - node_index.insert(2913742884576958969164113782587195202828846527657900496424141449477472273564, 55); - node_index.insert(2093568472535973986606438755824580633177115509557931302974988564932601955239, 56); - node_index.insert(3560543329106347446823281318204312198881533222464682017397248462954529220234, 57); - node_index.insert(2258329791422139736262782239641765930569031761627249090322755566443202104242, 58); - node_index.insert(780147230530856456622774510057100334628735431063744145772648079601317149643, 59); - node_index.insert(2316329094783634722527635915976455864728431870713378530935487247638854220445, 60); - node_index.insert(595942459003356191117553450912822964169058193996898486073017533717706655996, 61); - node_index.insert(468061318535033931711585815055033307297228787991312757359512916260570188285, 62); - node_index.insert(2052204235688624923559873131063770183910134013049526186717275231865702195614, 63); - node_index.insert(1699955311620840869165542755053722387608345658646185648087789689690825797785, 64); - node_index.insert(3374282522812564185678772854203408947562394461702303390331208821006329361123, 65); - node_index.insert(2973169188135795465401576355486514117723575153845438471619715618155257254587, 66); - node_index.insert(1933845760462748501896196912926633344425020928596291295340561855718789280752, 67); - node_index.insert(1400206374308839959676708676217334569580738052049798766556848516900888958934, 68); - node_index.insert(1440488595273849761788031183901254714714513692476890759699232177835922420051, 69); - node_index.insert(1765607197782429306903827944694032984087223086461400721152786273443512274576, 70); - node_index.insert(1081728107764482028110815183657783965582618309560569428049406599883158895762, 71); - node_index.insert(2062101824085365476835789898002802715794623271831111740147610520210138854237, 72); - node_index.insert(2074740322618091900768870458741540994849904300182495465356314088191301853065, 73); - node_index.insert(3258451235037745323160669027918885172565773098482160366154412360890640013860, 74); - node_index.insert(525053653813541387331907730505904505067816165493211829943994988775279102044, 75); - node_index.insert(1899573658331441767985549642643113663505618738939032010935036740376062596854, 76); - node_index.insert(350484224543766923071449868701665032398970313961410080649918872017849315812, 77); - node_index.insert(1950842492180490337143378914485176805944281696420768035114335939818602766139, 78); - node_index.insert(1404824782481446239312837894341789608778585592445990662138109764117920511709, 79); - node_index.insert(362836422984951199752185473435750713386745407518736982952373985921347236081, 80); - node_index.insert(946623025367211063265176586824604502073515634531788667777364911179858705558, 81); - node_index.insert(2633163324000277496191816132521100721217797223993064604664039067710591734562, 82); - node_index.insert(1801986104078933931671502775029170829560335045042499367678597186639133610708, 83); - node_index.insert(1420697278439090953165809531316265389371075037014378922361911811337560296928, 84); - node_index.insert(2818913779862691152404893285048164649343019708946413114150419613972391643833, 85); - node_index.insert(2117995436013652728497840885480545729833030913486848118093758726746902541269, 86); - node_index.insert(127751852951361188238686395231851222850913859197429858579312845246901369178, 87); - node_index.insert(2698811633001158191033663638617437313508153976714307643233173949778419312517, 88); - node_index.insert(658388282521842455588914251287531837029259203197178137902217792556456503561, 89); - node_index.insert(1181527093320872098458354979612125149419384756607076935731557552577945926179, 90); - node_index.insert(749436134732178646256740138670151907037714564259781780243747781475007506978, 91); - node_index.insert(139527053159256821789882596124320673637475746672994443968014105962305658551, 92); - node_index.insert(2256264752321707533173578319742847366660740117899562657584919346001438808295, 93); - node_index.insert(1471349294215639651865069312281269029496180149092207674923855978537861742949, 94); - node_index.insert(1599527610774916650758786135513735847459194869088601099692148267264507139422, 95); - node_index.insert(1348925567371118538973078195838174941892601233016661969987842843098656775084, 96); - node_index.insert(3255130909854220350850821724488067913492420563978595271106701962634473840914, 97); - node_index.insert(1098499015810170842401428216621470177488952811780672364884710297364076372943, 98); - node_index.insert(2666902303639302012507119689908308317608522901613536135678723310999647515155, 99); - node_index.insert(907997515879651052705985194221621380802961721264372722705825219340461809200, 100); - node_index.insert(2124360554325144308113106422635485756539471211141315552843423768396084888273, 101); - node_index.insert(3598736440043009208771817410113758019876931018927260161846683440123219507147, 102); - node_index.insert(1237113034722832488580561245188430373504295256910735188987019984096012001931, 103); - node_index.insert(884558344049768836371555446021588200903052780339208951904957349404044037185, 104); - node_index.insert(784280321344489256066716285882203121428790637989919760379274813665427427262, 105); - node_index.insert(3472551952588748711709398308465335743810517871695257916614928877311914574241, 106); - node_index.insert(1579363348100943961344032004617708767155021524242506190674861550786419896732, 107); - node_index.insert(653576968777651719072715499492112313607520878545254037043893560183879857489, 108); - node_index.insert(2633327961579170199842757290989312779085828750765842327985383652720803061926, 109); - node_index.insert(3101204920253220343970782457572784926765600523633379722044614528209389590915, 110); - node_index.insert(2537565394330405662800880050062241097694806466900452037378113841155978555645, 111); - node_index.insert(306955559655552244989220345789093187601563118591829582730637833945761653350, 112); - node_index.insert(1144065212212058748489308207801098564095305699242880891977316839573431241916, 113); - node_index.insert(3478181491851418723342103101321490659650934149094649769124337426850038155270, 114); - node_index.insert(3419621624676637660673415219086314486713019053519954317586073983685881930356, 115); - node_index.insert(2426908011370291613447136873176769136554489197972200481728552402228021778402, 116); - node_index.insert(1916122042123370178944690083048900704842269230325086549679099089416174875473, 117); - node_index.insert(2057207652658215393591191155928140567561900227203223756539551876829334137660, 118); - node_index.insert(2722034389703601317070746005702467061064354401688341549606678773616189196490, 119); - node_index.insert(1171026027377763359814377926117880688616494219551682642535759838199732407496, 120); - node_index.insert(3507234282031533800397666430789917374211847440333243952151005899337152633413, 121); - node_index.insert(591003147462937848375161803108517142253138969543815135207326321181858185919, 122); - node_index.insert(182069734527202013451813026473135702900640769187641767871411473365447302169, 123); - node_index.insert(1195243682249232878341146428166676460720423167409013083888435705219134747702, 124); - node_index.insert(1793425644853312386902998134061844248823841892125424765064687913085130719534, 125); - node_index.insert(1983622665815164792580256365519803214027269990384198703315493315153573288434, 126); - node_index.insert(3615973154491344159350153395208055142342062736505558158666764642048838175685, 127); - node_index.insert(2751715913626909804252433699602081411293721754810298670422380863932998088133, 128); - node_index.insert(186918881712189523740089713555196200069231794627360499557319265374750577226, 129); - node_index.insert(696585542544434929491503209053317581175146475161262066468664234437983008675, 130); - node_index.insert(4359830495913805154545225899592517767672472055784183911796827820518038513, 131); - node_index.insert(2954335207058000607751727656601539819316106074875304820535376873121805433820, 132); - node_index.insert(2510390039949230255082316953804013731253145558531652907601250263563528226672, 133); - node_index.insert(3226995230854300551967642178527450300960499043510855212238369890580256668532, 134); - node_index.insert(1620924075233065517364532267959798304439946408626316544761884056227131075831, 135); - node_index.insert(1610900122192929153657761847202689179268074338802437933866337242354758101660, 136); - node_index.insert(2565949095169598991903537465065584077778440646580025930326495506484329892725, 137); - node_index.insert(1012362975819634411571869839734809106575285344002573666983595104659295812607, 138); - node_index.insert(242312010918799555845832460483650516749990744287009628468613253461264531026, 139); - node_index.insert(1104776796569046483584574115975216172161469015460244982207905888870418040487, 140); - node_index.insert(3289555912992777681578950209252840071327866822704829766247386311885634446673, 141); - node_index.insert(3133389957643610781371406448279843175887428913359743769920083259111437722268, 142); - node_index.insert(1169918710119352022244140656086831769713178729571654411898266328562003734517, 143); - node_index.insert(3592039235252149652556167686570045881877115549259769455422056097903987237819, 144); - node_index.insert(2048175709145840597887667330964815895803568760936075562647625937161113445908, 145); - node_index.insert(602222645962845554276438041138511866776339653340605661136009451417275008940, 146); - node_index.insert(3318742320906017551291978242369663702298606650330380959683585594592748661010, 147); - node_index.insert(564160996724923690963741657975239836484028160385417016805513722318839327322, 148); - node_index.insert(656294390376267384135628810815504467149264887388377312825033341338166573620, 149); - node_index.insert(1201592236750942207412694706123654466634588634474700675083122904145559965915, 150); - node_index.insert(2141408926815137181004274624388915700231991905288681935478972043994347966006, 151); - node_index.insert(1440847977042239464860406726605567303568767649154338464116083965986084755262, 152); - node_index.insert(950585553138591375958592507876257987416844837045084288783892644487908218679, 153); - node_index.insert(257643451533833048856069434258149588745628261389615631070776723485957908127, 154); + node_index + .insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0); + node_index + .insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1); + node_index + .insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2); + node_index + .insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3); + node_index + .insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4); + node_index + .insert(3344223123784052057366048933846905716067140384361791026153972616805110454637, 5); + node_index + .insert(658476905110174425295568215706634733332002869979287079110965040248935650599, 6); + node_index + .insert(2836212335642438363012490794290757623813171043187182819737087983331902926990, 7); + node_index + .insert(3496601277869056110810900082189273917786762659443522403285387602989271154262, 8); + node_index + .insert(1249294489531540970169611621067106471309281870082955806338234725206665112557, 9); + node_index + .insert(2161697998033672097816961828039488190903838124365465380011173778905747857792, 10); + node_index + .insert(1129815197211541481934112806673325772687763881719835256646064516195041515616, 11); + node_index + .insert(2592593088135949192377729543480191336537305484235681164569491942155715064163, 12); + node_index + .insert(578223957014284909949571568465953382377214912750427143720957054706073492593, 13); + node_index + .insert(1645617302026197421098102802983206579163506957138012501615708926120228167528, 14); + node_index + .insert(2809438816810155970395166036110536928593305127049404137239671320081144123490, 15); + node_index + .insert(2496308528011391755709310159103918074725328650411689040761791240500618770096, 16); + node_index + .insert(2003594778587446957576114348312422277631766150749194167061999666337236425714, 17); + node_index + .insert(2215681478480673835576618830034726157921200517935329010004363713426342305479, 18); + node_index + .insert(3185925835074464079989752015681272863271067691852543168049845807561733691707, 19); + node_index + .insert(1207265836470221457484062512091666004839070622130697586496866096347024057755, 20); + node_index + .insert(1870230949202979679764944800468118671928852128047695497376875566624821494262, 21); + node_index + .insert(618060852536781954395603948693216564334274573299243914053414488061601327758, 22); + node_index + .insert(232760707548494477255512699093366059519467428168757247456690480397246371463, 23); + node_index + .insert(1617386247965480308136742715422077429967341022950306068917456849194882895900, 24); + node_index + .insert(654822874782506608656472905579051041410086644071534146326024101025575400153, 25); + node_index + .insert(525638101901638132526332140778087078272370083489998903571807698910013602668, 26); + node_index + .insert(3091640181556387972179279087539287892670640556085669903494551919685982442095, 27); + node_index + .insert(1425411460578159050163131982087304445715005458700346341117759372943452688022, 28); + node_index + .insert(1722933265299553894839124723076027659619615015638971980461286818493531809034, 29); + node_index + .insert(3325117385742592388671007840076299062858228097051060057749225651290693960897, 30); + node_index + .insert(1869273998012404873272699831805499731567895666937555882116307079956228100456, 31); + node_index + .insert(257262395234910825879033951801423835835630270967846664413154594520703929530, 32); + node_index + .insert(2891500475385583315757684141371327604925143655360011721762142660942782195029, 33); + node_index + .insert(1257459981124043271342269816753070228024611695909553991758648317372015085782, 34); + node_index + .insert(3573101724490615587655146760489247477770015274618159524231872921394794809579, 35); + node_index + .insert(2951401777594449283985541406642940553317465718696638438535370997641527993378, 36); + node_index + .insert(2436860863451320452900512817385686838091627966322316039332239784330434600829, 37); + node_index + .insert(3257977356974702770994741663931928753019715185508521958836925918758890988390, 38); + node_index + .insert(2741853283805093821434776875305720302351684616683152528499335618682018880592, 39); + node_index + .insert(514567459251558911686762246500770717674979116530125263461114578537254680672, 40); + node_index + .insert(2119374930171040799805795099091470687208894498354655018353474015395489390434, 41); + node_index + .insert(3338470191188327918255138125570464269857839379813971679216902484398948556964, 42); + node_index + .insert(2892272281879752543368066497063301979597320550780387266511926397533716561161, 43); + node_index + .insert(2855312300216814846973137837923466865382642814675378398541743368270404441020, 44); + node_index + .insert(3483159989811162048659069774034779954374540681397531094699912464364012442948, 45); + node_index + .insert(2987290998320166766043911843685118029159841654368226419198314196237253901671, 46); + node_index + .insert(2925128850088180758852255336587985612621894021863350117875677692518888637440, 47); + node_index + .insert(2816470536741550741568042622139415760794090671576940833850781679568928363263, 48); + node_index + .insert(117504025904364990582663097556885493352655695615775952177872159762046032741, 49); + node_index + .insert(2143228410294149239354901612797540167003066966910132278060626241695943498248, 50); + node_index + .insert(419311759585766455354017006957403420381614228026953716552023555428752798694, 51); + node_index + .insert(3050064038480880151202753004776919876287903442365303272956696507808448797287, 52); + node_index + .insert(1385347512411195789080079656286641766866442255046855963092069449745407366357, 53); + node_index + .insert(3070310993421490198115289431281422702215620142859327949152517372324361472619, 54); + node_index + .insert(2913742884576958969164113782587195202828846527657900496424141449477472273564, 55); + node_index + .insert(2093568472535973986606438755824580633177115509557931302974988564932601955239, 56); + node_index + .insert(3560543329106347446823281318204312198881533222464682017397248462954529220234, 57); + node_index + .insert(2258329791422139736262782239641765930569031761627249090322755566443202104242, 58); + node_index + .insert(780147230530856456622774510057100334628735431063744145772648079601317149643, 59); + node_index + .insert(2316329094783634722527635915976455864728431870713378530935487247638854220445, 60); + node_index + .insert(595942459003356191117553450912822964169058193996898486073017533717706655996, 61); + node_index + .insert(468061318535033931711585815055033307297228787991312757359512916260570188285, 62); + node_index + .insert(2052204235688624923559873131063770183910134013049526186717275231865702195614, 63); + node_index + .insert(1699955311620840869165542755053722387608345658646185648087789689690825797785, 64); + node_index + .insert(3374282522812564185678772854203408947562394461702303390331208821006329361123, 65); + node_index + .insert(2973169188135795465401576355486514117723575153845438471619715618155257254587, 66); + node_index + .insert(1933845760462748501896196912926633344425020928596291295340561855718789280752, 67); + node_index + .insert(1400206374308839959676708676217334569580738052049798766556848516900888958934, 68); + node_index + .insert(1440488595273849761788031183901254714714513692476890759699232177835922420051, 69); + node_index + .insert(1765607197782429306903827944694032984087223086461400721152786273443512274576, 70); + node_index + .insert(1081728107764482028110815183657783965582618309560569428049406599883158895762, 71); + node_index + .insert(2062101824085365476835789898002802715794623271831111740147610520210138854237, 72); + node_index + .insert(2074740322618091900768870458741540994849904300182495465356314088191301853065, 73); + node_index + .insert(3258451235037745323160669027918885172565773098482160366154412360890640013860, 74); + node_index + .insert(525053653813541387331907730505904505067816165493211829943994988775279102044, 75); + node_index + .insert(1899573658331441767985549642643113663505618738939032010935036740376062596854, 76); + node_index + .insert(350484224543766923071449868701665032398970313961410080649918872017849315812, 77); + node_index + .insert(1950842492180490337143378914485176805944281696420768035114335939818602766139, 78); + node_index + .insert(1404824782481446239312837894341789608778585592445990662138109764117920511709, 79); + node_index + .insert(362836422984951199752185473435750713386745407518736982952373985921347236081, 80); + node_index + .insert(946623025367211063265176586824604502073515634531788667777364911179858705558, 81); + node_index + .insert(2633163324000277496191816132521100721217797223993064604664039067710591734562, 82); + node_index + .insert(1801986104078933931671502775029170829560335045042499367678597186639133610708, 83); + node_index + .insert(1420697278439090953165809531316265389371075037014378922361911811337560296928, 84); + node_index + .insert(2818913779862691152404893285048164649343019708946413114150419613972391643833, 85); + node_index + .insert(2117995436013652728497840885480545729833030913486848118093758726746902541269, 86); + node_index + .insert(127751852951361188238686395231851222850913859197429858579312845246901369178, 87); + node_index + .insert(2698811633001158191033663638617437313508153976714307643233173949778419312517, 88); + node_index + .insert(658388282521842455588914251287531837029259203197178137902217792556456503561, 89); + node_index + .insert(1181527093320872098458354979612125149419384756607076935731557552577945926179, 90); + node_index + .insert(749436134732178646256740138670151907037714564259781780243747781475007506978, 91); + node_index + .insert(139527053159256821789882596124320673637475746672994443968014105962305658551, 92); + node_index + .insert(2256264752321707533173578319742847366660740117899562657584919346001438808295, 93); + node_index + .insert(1471349294215639651865069312281269029496180149092207674923855978537861742949, 94); + node_index + .insert(1599527610774916650758786135513735847459194869088601099692148267264507139422, 95); + node_index + .insert(1348925567371118538973078195838174941892601233016661969987842843098656775084, 96); + node_index + .insert(3255130909854220350850821724488067913492420563978595271106701962634473840914, 97); + node_index + .insert(1098499015810170842401428216621470177488952811780672364884710297364076372943, 98); + node_index + .insert(2666902303639302012507119689908308317608522901613536135678723310999647515155, 99); + node_index + .insert(907997515879651052705985194221621380802961721264372722705825219340461809200, 100); + node_index + .insert(2124360554325144308113106422635485756539471211141315552843423768396084888273, 101); + node_index + .insert(3598736440043009208771817410113758019876931018927260161846683440123219507147, 102); + node_index + .insert(1237113034722832488580561245188430373504295256910735188987019984096012001931, 103); + node_index + .insert(884558344049768836371555446021588200903052780339208951904957349404044037185, 104); + node_index + .insert(784280321344489256066716285882203121428790637989919760379274813665427427262, 105); + node_index + .insert(3472551952588748711709398308465335743810517871695257916614928877311914574241, 106); + node_index + .insert(1579363348100943961344032004617708767155021524242506190674861550786419896732, 107); + node_index + .insert(653576968777651719072715499492112313607520878545254037043893560183879857489, 108); + node_index + .insert(2633327961579170199842757290989312779085828750765842327985383652720803061926, 109); + node_index + .insert(3101204920253220343970782457572784926765600523633379722044614528209389590915, 110); + node_index + .insert(2537565394330405662800880050062241097694806466900452037378113841155978555645, 111); + node_index + .insert(306955559655552244989220345789093187601563118591829582730637833945761653350, 112); + node_index + .insert(1144065212212058748489308207801098564095305699242880891977316839573431241916, 113); + node_index + .insert(3478181491851418723342103101321490659650934149094649769124337426850038155270, 114); + node_index + .insert(3419621624676637660673415219086314486713019053519954317586073983685881930356, 115); + node_index + .insert(2426908011370291613447136873176769136554489197972200481728552402228021778402, 116); + node_index + .insert(1916122042123370178944690083048900704842269230325086549679099089416174875473, 117); + node_index + .insert(2057207652658215393591191155928140567561900227203223756539551876829334137660, 118); + node_index + .insert(2722034389703601317070746005702467061064354401688341549606678773616189196490, 119); + node_index + .insert(1171026027377763359814377926117880688616494219551682642535759838199732407496, 120); + node_index + .insert(3507234282031533800397666430789917374211847440333243952151005899337152633413, 121); + node_index + .insert(591003147462937848375161803108517142253138969543815135207326321181858185919, 122); + node_index + .insert(182069734527202013451813026473135702900640769187641767871411473365447302169, 123); + node_index + .insert(1195243682249232878341146428166676460720423167409013083888435705219134747702, 124); + node_index + .insert(1793425644853312386902998134061844248823841892125424765064687913085130719534, 125); + node_index + .insert(1983622665815164792580256365519803214027269990384198703315493315153573288434, 126); + node_index + .insert(3615973154491344159350153395208055142342062736505558158666764642048838175685, 127); + node_index + .insert(2751715913626909804252433699602081411293721754810298670422380863932998088133, 128); + node_index + .insert(186918881712189523740089713555196200069231794627360499557319265374750577226, 129); + node_index + .insert(696585542544434929491503209053317581175146475161262066468664234437983008675, 130); + node_index + .insert(4359830495913805154545225899592517767672472055784183911796827820518038513, 131); + node_index + .insert(2954335207058000607751727656601539819316106074875304820535376873121805433820, 132); + node_index + .insert(2510390039949230255082316953804013731253145558531652907601250263563528226672, 133); + node_index + .insert(3226995230854300551967642178527450300960499043510855212238369890580256668532, 134); + node_index + .insert(1620924075233065517364532267959798304439946408626316544761884056227131075831, 135); + node_index + .insert(1610900122192929153657761847202689179268074338802437933866337242354758101660, 136); + node_index + .insert(2565949095169598991903537465065584077778440646580025930326495506484329892725, 137); + node_index + .insert(1012362975819634411571869839734809106575285344002573666983595104659295812607, 138); + node_index + .insert(242312010918799555845832460483650516749990744287009628468613253461264531026, 139); + node_index + .insert(1104776796569046483584574115975216172161469015460244982207905888870418040487, 140); + node_index + .insert(3289555912992777681578950209252840071327866822704829766247386311885634446673, 141); + node_index + .insert(3133389957643610781371406448279843175887428913359743769920083259111437722268, 142); + node_index + .insert(1169918710119352022244140656086831769713178729571654411898266328562003734517, 143); + node_index + .insert(3592039235252149652556167686570045881877115549259769455422056097903987237819, 144); + node_index + .insert(2048175709145840597887667330964815895803568760936075562647625937161113445908, 145); + node_index + .insert(602222645962845554276438041138511866776339653340605661136009451417275008940, 146); + node_index + .insert(3318742320906017551291978242369663702298606650330380959683585594592748661010, 147); + node_index + .insert(564160996724923690963741657975239836484028160385417016805513722318839327322, 148); + node_index + .insert(656294390376267384135628810815504467149264887388377312825033341338166573620, 149); + node_index + .insert(1201592236750942207412694706123654466634588634474700675083122904145559965915, 150); + node_index + .insert(2141408926815137181004274624388915700231991905288681935478972043994347966006, 151); + node_index + .insert(1440847977042239464860406726605567303568767649154338464116083965986084755262, 152); + node_index + .insert(950585553138591375958592507876257987416844837045084288783892644487908218679, 153); + node_index + .insert(257643451533833048856069434258149588745628261389615631070776723485957908127, 154); let atts = TreeEnsembleAttributes { nodes_falsenodeids, @@ -687,19 +2421,20 @@ fn tree_ensemble_classifier_binary_class_helper( }; let mut X = TensorTrait::new( - array![1,9].span(), - array![ - FP16x16 { mag: 39321, sign: false }, - FP16x16 { mag: 32768, sign: false }, - FP16x16 { mag: 52428, sign: false }, - FP16x16 { mag: 16384, sign: false }, - FP16x16 { mag: 0, sign: false }, - FP16x16 { mag: 65536, sign: false }, - FP16x16 { mag: 0, sign: false }, - FP16x16 { mag: 16384, sign: false }, - FP16x16 { mag: 0, sign: false }, - ].span() - ); + array![1, 9].span(), + array![ + FP16x16 { mag: 39321, sign: false }, + FP16x16 { mag: 32768, sign: false }, + FP16x16 { mag: 52428, sign: false }, + FP16x16 { mag: 16384, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 16384, sign: false }, + FP16x16 { mag: 0, sign: false }, + ] + .span() + ); (classifier, X) -} \ No newline at end of file +} diff --git a/tests/nodes.cairo b/tests/nodes.cairo index e829e085f..b346347b5 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -836,12 +836,6 @@ mod unique_u32_with_axis_zero_sorted; mod unique_u32_with_axis_zero_not_sorted; mod unique_u32_with_axis_one_sorted; mod unique_u32_with_axis_one_not_sorted; -mod reduce_log_sum_exp_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_exp_fp16x16_export_keepdims; -mod reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims; -mod reduce_log_sum_exp_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_exp_fp8x23_export_keepdims; -mod reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims; mod gather_nd_fp16x16_3d_default; mod gather_nd_fp16x16_3d_batch_dims1; mod gather_nd_fp16x16_3d_batch_dims2; @@ -875,3 +869,4 @@ mod compress_u32_3d_axis1; mod compress_u32_3d_axis2; mod compress_u32_3d_axis2_2; mod compress_u32_3d_axis3; + diff --git a/tests/nodes/compress_fp16x16_3d_axis1.cairo b/tests/nodes/compress_fp16x16_3d_axis1.cairo index de0c173ed..f110fd66d 100644 --- a/tests/nodes/compress_fp16x16_3d_axis1.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis2.cairo b/tests/nodes/compress_fp16x16_3d_axis2.cairo index 765bcb5ea..1115fb557 100644 --- a/tests/nodes/compress_fp16x16_3d_axis2.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis3.cairo b/tests/nodes/compress_fp16x16_3d_axis3.cairo index ffa9c8321..76ef5f641 100644 --- a/tests/nodes/compress_fp16x16_3d_axis3.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis3.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_default.cairo b/tests/nodes/compress_fp16x16_3d_default.cairo index d9b837a19..aff1849e2 100644 --- a/tests/nodes/compress_fp16x16_3d_default.cairo +++ b/tests/nodes/compress_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_noaxis.cairo b/tests/nodes/compress_fp16x16_3d_noaxis.cairo index 2bd536e08..3c9645b1d 100644 --- a/tests/nodes/compress_fp16x16_3d_noaxis.cairo +++ b/tests/nodes/compress_fp16x16_3d_noaxis.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_noaxis() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::None(())); + let y_0 = input_0.compress(condition: input_1, axis: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis1.cairo b/tests/nodes/compress_fp8x23_3d_axis1.cairo index edd013f54..f7edfd13a 100644 --- a/tests/nodes/compress_fp8x23_3d_axis1.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis2.cairo b/tests/nodes/compress_fp8x23_3d_axis2.cairo index 580a6272a..369ffb8bf 100644 --- a/tests/nodes/compress_fp8x23_3d_axis2.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_default.cairo b/tests/nodes/compress_fp8x23_3d_default.cairo index a927f7fe8..eab9aa1ac 100644 --- a/tests/nodes/compress_fp8x23_3d_default.cairo +++ b/tests/nodes/compress_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis1.cairo b/tests/nodes/compress_i32_3d_axis1.cairo index f69cf2e2a..571e5beb5 100644 --- a/tests/nodes/compress_i32_3d_axis1.cairo +++ b/tests/nodes/compress_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis2.cairo b/tests/nodes/compress_i32_3d_axis2.cairo index bfe01e5a0..be674ffba 100644 --- a/tests/nodes/compress_i32_3d_axis2.cairo +++ b/tests/nodes/compress_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_default.cairo b/tests/nodes/compress_i32_3d_default.cairo index b07d95010..4bd05fce1 100644 --- a/tests/nodes/compress_i32_3d_default.cairo +++ b/tests/nodes/compress_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis1.cairo b/tests/nodes/compress_i8_3d_axis1.cairo index 6a4197ce1..fae6c2356 100644 --- a/tests/nodes/compress_i8_3d_axis1.cairo +++ b/tests/nodes/compress_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis2.cairo b/tests/nodes/compress_i8_3d_axis2.cairo index 4dd7b5a8f..f8e90c133 100644 --- a/tests/nodes/compress_i8_3d_axis2.cairo +++ b/tests/nodes/compress_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_default.cairo b/tests/nodes/compress_i8_3d_default.cairo index 14b684377..1b4052d0e 100644 --- a/tests/nodes/compress_i8_3d_default.cairo +++ b/tests/nodes/compress_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis1.cairo b/tests/nodes/compress_u32_3d_axis1.cairo index dda59bead..7cfadc989 100644 --- a/tests/nodes/compress_u32_3d_axis1.cairo +++ b/tests/nodes/compress_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2.cairo b/tests/nodes/compress_u32_3d_axis2.cairo index ba8fa77ef..9c70291c5 100644 --- a/tests/nodes/compress_u32_3d_axis2.cairo +++ b/tests/nodes/compress_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2_2.cairo b/tests/nodes/compress_u32_3d_axis2_2.cairo index aa283b2cc..850c10296 100644 --- a/tests/nodes/compress_u32_3d_axis2_2.cairo +++ b/tests/nodes/compress_u32_3d_axis2_2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2_2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis3.cairo b/tests/nodes/compress_u32_3d_axis3.cairo index 62684b39f..c53e3e1b1 100644 --- a/tests/nodes/compress_u32_3d_axis3.cairo +++ b/tests/nodes/compress_u32_3d_axis3.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_default.cairo b/tests/nodes/compress_u32_3d_default.cairo index 058750c53..a7d987eb6 100644 --- a/tests/nodes/compress_u32_3d_default.cairo +++ b/tests/nodes/compress_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo deleted file mode 100644 index a3a8e8b52..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16Tensor; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_log_sum_exp_fp16x16_export_do_not_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_log_sum_exp(2, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo deleted file mode 100644 index 572168299..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16Tensor; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - data.append(FP16x16 { mag: 327680, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 458752, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 589824, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo deleted file mode 100644 index c7ca08504..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_do_not_keepdims/output_0.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16Tensor; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo deleted file mode 100644 index 2f3ff6936..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16Tensor; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_log_sum_exp_fp16x16_export_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_log_sum_exp(2, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo deleted file mode 100644 index 572168299..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16Tensor; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - data.append(FP16x16 { mag: 327680, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 458752, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 589824, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo deleted file mode 100644 index d3a56d7df..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_keepdims/output_0.cairo +++ /dev/null @@ -1,20 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16Tensor; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo deleted file mode 100644 index f7a80713d..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16Tensor; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_log_sum_exp(0, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo deleted file mode 100644 index 572168299..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16Tensor; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - data.append(FP16x16 { mag: 327680, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 458752, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 589824, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo deleted file mode 100644 index 6dd797b3e..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims/output_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16Tensor; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 589824, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo deleted file mode 100644 index 3a48bad4f..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_log_sum_exp_fp8x23_export_do_not_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_log_sum_exp(2, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo deleted file mode 100644 index d8f5ac09d..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo deleted file mode 100644 index 72dab7e50..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_do_not_keepdims/output_0.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo deleted file mode 100644 index 488899408..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_log_sum_exp_fp8x23_export_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_log_sum_exp(2, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo deleted file mode 100644 index d8f5ac09d..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo deleted file mode 100644 index bcb32e2c1..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_keepdims/output_0.cairo +++ /dev/null @@ -1,20 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo deleted file mode 100644 index 90a88770c..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_log_sum_exp(0, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo deleted file mode 100644 index d8f5ac09d..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo deleted file mode 100644 index f2137c03c..000000000 --- a/tests/nodes/reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims/output_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP8x23Tensor; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} From a1ce3790e0491d1fc45f6c5e3bbdb76ef5674fcb Mon Sep 17 00:00:00 2001 From: Beeyoung <55970530+FriendlyLifeguard@users.noreply.github.com> Date: Tue, 13 Feb 2024 04:07:32 -0800 Subject: [PATCH 14/40] Final --- .../tensor/tensor.reduce_log_sum_exp.md | 81 +++----- nodegen/helpers.py | 16 +- nodegen/node/reduce_log_sum.py | 2 +- nodegen/node/reduce_log_sum_exp.py | 196 +++++------------- src/operators/tensor/core.cairo | 77 +++---- .../tensor/math/reduce_log_sum_exp.cairo | 46 ++-- tests/nodes.cairo | 3 + ...m_exp_fp32x32_export_do_not_keepdims.cairo | 20 ++ .../input_0.cairo | 26 +++ .../output_0.cairo | 19 ++ ..._log_sum_exp_fp32x32_export_keepdims.cairo | 20 ++ .../input_0.cairo | 26 +++ .../output_0.cairo | 20 ++ ...p32x32_export_negative_axes_keepdims.cairo | 20 ++ .../input_0.cairo | 26 +++ .../output_0.cairo | 18 ++ 16 files changed, 353 insertions(+), 263 deletions(-) create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo diff --git a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md index b631371d5..8befd8c43 100644 --- a/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md +++ b/docs/framework/operators/tensor/tensor.reduce_log_sum_exp.md @@ -24,52 +24,37 @@ Returns a new `Tensor` instance with the specified axis reduced by summing it ```rust use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - use orion::operators::tensor::FP8x23Tensor; - use orion::numbers::{FixedTrait, FP8x23}; - - fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); - data.append(FixedTrait::new_unscaled(12, false)); - ->>> ( - [[[1, 2] - [3, 4]] - - [[5, 6]] - [7, 8]] - - [[9, 10] - [11, 12]]] - ) - - - let tensor = TensorTrait::new(shape.span(), data.span()) +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn reduce_log_sum_exp() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) + + let tensor = TensorTrait::::new(shape.span(), data.span()); + + return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + + } + - return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); -} - ->>> - ( - [[2.31, 4.31] - [6.31, 8.31] - [10.31, 12.31]] - ) - -``` \ No newline at end of file +>>> [[9215828, 16323477, 20115004], [22716772, 24699744, 26302432]] +``` diff --git a/nodegen/helpers.py b/nodegen/helpers.py index 03fae966c..01300fdb6 100644 --- a/nodegen/helpers.py +++ b/nodegen/helpers.py @@ -10,7 +10,7 @@ class FixedImpl(Enum): FP8x23 = 'FP8x23' FP16x16 = 'FP16x16' - FP64x64 = 'FP64x64' + FP32x32 = 'FP32x32' @@ -20,14 +20,15 @@ def to_fp(x: np.ndarray, fp_impl: FixedImpl): return (x * 2**23).astype(np.int64) case FixedImpl.FP16x16: return (x * 2**16).astype(np.int64) - case FixedImpl.FP64x64: - return (x * 2**64) + case FixedImpl.FP32x32: + return (x * 2**32).astype(np.int64) + class Dtype(Enum): FP8x23 = 'FP8x23' FP16x16 = 'FP16x16' - FP64x64 = 'FP64x64' + FP32x32 = 'FP32x32' I8 = 'i8' I32 = 'i32' U32 = 'u32' @@ -173,8 +174,8 @@ def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]: return ["FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.FP16x16: return ["FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] - case Dtype.FP64x64: - return ["FP64x64 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] + case Dtype.FP32x32: + return ["FP32x32 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.BOOL: return [str(x).lower() for x in data.flatten()] case Dtype.COMPLEX64: @@ -252,6 +253,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: Dtype.FP16x16: ["orion::operators::tensor::FP16x16Tensor",], Dtype.BOOL: ["orion::operators::tensor::BoolTensor",], Dtype.COMPLEX64: ["orion::operators::tensor::Complex64Tensor",], + Dtype.FP32x32: ["orion::operators::tensor::FP32x32Tensor",], } @@ -279,6 +281,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: Dtype.I8: ["orion::operators::tensor::I8TensorPartialEq",], Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",], Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",], + Dtype.FP32x32: ["orion::operators::tensor::FP32x32TensorPartialEq",], Dtype.BOOL: ["orion::operators::tensor::BoolTensorPartialEq",], Dtype.COMPLEX64: ["orion::operators::tensor::Complex64TensorPartialEq",], } @@ -290,6 +293,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: Dtype.I8: ["orion::numbers::{IntegerTrait, i8}",], Dtype.FP8x23: ["orion::numbers::{FixedTrait, FP8x23}",], Dtype.FP16x16: ["orion::numbers::{FixedTrait, FP16x16}",], + Dtype.FP32x32: ["orion::numbers::{FixedTrait, FP32x32}",], Dtype.BOOL: [], Dtype.COMPLEX64: ["orion::numbers::{NumberTrait, complex64}",], } \ No newline at end of file diff --git a/nodegen/node/reduce_log_sum.py b/nodegen/node/reduce_log_sum.py index 259081f5a..9dc8ad4df 100644 --- a/nodegen/node/reduce_log_sum.py +++ b/nodegen/node/reduce_log_sum.py @@ -1,7 +1,7 @@ import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl -import numpy as np + class Reduce_log_sum(RunAll): diff --git a/nodegen/node/reduce_log_sum_exp.py b/nodegen/node/reduce_log_sum_exp.py index 6f405f992..e4da8b1e0 100644 --- a/nodegen/node/reduce_log_sum_exp.py +++ b/nodegen/node/reduce_log_sum_exp.py @@ -1,144 +1,62 @@ -# import numpy as np -# from nodegen.node import RunAll -# from ..helpers import make_test, Tensor, Dtype, FixedImpl, to_fp +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, Tensor, Dtype, FixedImpl, to_fp + +class Reduce_log_sum_exp(RunAll): + @staticmethod + def reduce_log_sum_exp_fp32x32(): + def reduce_log_sum_exp_export_do_not_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = False + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.float64) + + x = Tensor(Dtype.FP32x32, x.shape, to_fp( + x.flatten(), FixedImpl.FP32x32)) + y = Tensor(Dtype.FP32x32, y.shape, to_fp( + y.flatten(), FixedImpl.FP32x32)) + + name = "reduce_log_sum_exp_fp32x32_export_do_not_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, false)", name) + + def reduce_log_sum_exp_export_keepdims(): + shape = [3, 2, 2] + axes = np.array([2], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64) + + x = Tensor(Dtype.FP32x32, x.shape, to_fp( + x.flatten(), FixedImpl.FP32x32)) + y = Tensor(Dtype.FP32x32, y.shape, to_fp( + y.flatten(), FixedImpl.FP32x32)) + + name = "reduce_log_sum_exp_fp32x32_export_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(2, true)", name) + + def reduce_log_sum_exp_axis_0(): + shape = [3, 2, 2] + axes = np.array([0], dtype=np.int64) + keepdims = True + x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) + y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64) + + x = Tensor(Dtype.FP32x32, x.shape, to_fp( + x.flatten(), FixedImpl.FP32x32)) + y = Tensor(Dtype.FP32x32, y.shape, to_fp( + y.flatten(), FixedImpl.FP32x32)) + + name = "reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims" + make_test( + [x], y, "input_0.reduce_log_sum_exp(0, true)", name) -# class Reduce_log_sum_exp(RunAll): -# @staticmethod -# def reduce_log_sum_exp_fp64x64(): -# shape = [3, 2, 2] -# axes = np.array([2], dtype=np.uint32) -# keepdims = False - -# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) - -# x = Tensor(Dtype.FP64x64, x.shape, to_fp( -# x.flatten(), FixedImpl.FP64x64)) -# y = Tensor(Dtype.FP64x64, y.shape, to_fp( -# y.flatten(), FixedImpl.FP64x64)) - -# name = "reduce_log_sum_exp_fp64x64_export_do_not_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(2, false)", name) - - - - - - -# def reduce_log_sum_exp_fp16x16(): -# def reduce_log_sum_exp_export_do_not_keepdims(): -# shape = [3, 2, 2] -# axes = np.array([2], dtype=np.uint32) -# keepdims = False -# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) - -# x = Tensor(Dtype.FP16x16, x.shape, to_fp( -# x.flatten(), FixedImpl.FP16x16)) -# y = Tensor(Dtype.FP8x23, y.shape, to_fp( -# y.flatten(), FixedImpl.FP16x16)) - -# name = "reduce_log_sum_exp_fp8x23_export_do_not_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(2, false)", name) - -# def reduce_log_sum_exp_export_keepdims(): -# shape = [3, 2, 2] -# axes = np.array([2], dtype=np.uint32) -# keepdims = True -# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - -# x = Tensor(Dtype.FP8x23, x.shape, to_fp( -# x.flatten(), FixedImpl.FP8x23)) -# y = Tensor(Dtype.FP8x23, y.shape, to_fp( -# y.flatten(), FixedImpl.FP8x23)) - -# name = "reduce_log_sum_exp_fp8x23_export_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(2, true)", name) - -# def reduce_log_sum_exp_axis_0(): -# shape = [3, 2, 2] -# axes = np.array([0], dtype=np.uint32) -# keepdims = True -# x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - -# x = Tensor(Dtype.FP8x23, x.shape, to_fp( -# x.flatten(), FixedImpl.FP8x23)) -# y = Tensor(Dtype.FP8x23, y.shape, to_fp( -# y.flatten(), FixedImpl.FP8x23)) - -# name = "reduce_log_sum_exp_fp8x23_export_negative_axes_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(0, true)", name) - -# reduce_log_sum_exp_export_do_not_keepdims() -# reduce_log_sum_exp_export_keepdims() -# reduce_log_sum_exp_axis_0() - -# @staticmethod -# def reduce_log_sum_exp_fp16x16(): -# def reduce_log_sum_exp_export_do_not_keepdims(): -# shape = [3, 2, 2] -# axes = np.array([2], dtype=np.uint32) -# keepdims = False -# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.uint32) - -# x = Tensor(Dtype.FP16x16, x.shape, to_fp( -# x.flatten(), FixedImpl.FP16x16)) -# y = Tensor(Dtype.FP16x16, y.shape, to_fp( -# y.flatten(), FixedImpl.FP16x16)) - -# name = "reduce_log_sum_exp_fp16x16_export_do_not_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(2, false)", name) - -# def reduce_log_sum_exp_export_keepdims(): -# shape = [3, 2, 2] -# axes = np.array([2], dtype=np.uint32) -# keepdims = True -# x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.uint32), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - -# x = Tensor(Dtype.FP16x16, x.shape, to_fp( -# x.flatten(), FixedImpl.FP16x16)) -# y = Tensor(Dtype.FP16x16, y.shape, to_fp( -# y.flatten(), FixedImpl.FP16x16)) - -# name = "reduce_log_sum_exp_fp16x16_export_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(2, true)", name) - -# def reduce_log_sum_exp_axis_0(): -# shape = [3, 2, 2] -# axes = np.array([0], dtype=np.uint32) -# keepdims = True -# x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) -# y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.uint32) - -# x = Tensor(Dtype.FP16x16, x.shape, to_fp( -# x.flatten(), FixedImpl.FP16x16)) -# y = Tensor(Dtype.FP16x16, y.shape, to_fp( -# y.flatten(), FixedImpl.FP16x16)) - -# name = "reduce_log_sum_exp_fp16x16_export_negative_axes_keepdims" -# make_test( -# [x], y, "input_0.reduce_log_sum_exp(0, true)", name) - -# reduce_log_sum_exp_export_do_not_keepdims() -# reduce_log_sum_exp_export_keepdims() -# reduce_log_sum_exp_axis_0() - - - - - - + reduce_log_sum_exp_export_do_not_keepdims() + reduce_log_sum_exp_export_keepdims() + reduce_log_sum_exp_axis_0() diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 540389c62..709a673f7 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -4588,56 +4588,41 @@ trait TensorTrait { /// /// ```rust /// use core::array::{ArrayTrait, SpanTrait}; - /// use orion::operators::tensor::{TensorTrait, Tensor}; - /// use orion::operators::tensor::FP8x23Tensor; - /// use orion::numbers::{FixedTrait, FP8x23}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::FP32x32Tensor; + /// use orion::numbers::{FixedTrait, FP32x32}; /// - /// fn input_0() -> Tensor { - /// let mut shape = ArrayTrait::::new(); - /// shape.append(3); - /// shape.append(2); - /// shape.append(2); - /// - /// let mut data = ArrayTrait::new(); - /// data.append(FixedTrait::new_unscaled(1, false)); - /// data.append(FixedTrait::new_unscaled(2, false)); - /// data.append(FixedTrait::new_unscaled(3, false)); - /// data.append(FixedTrait::new_unscaled(4, false)); - /// data.append(FixedTrait::new_unscaled(5, false)); - /// data.append(FixedTrait::new_unscaled(6, false)); - /// data.append(FixedTrait::new_unscaled(7, false)); - /// data.append(FixedTrait::new_unscaled(8, false)); - /// data.append(FixedTrait::new_unscaled(9, false)); - /// data.append(FixedTrait::new_unscaled(10, false)); - /// data.append(FixedTrait::new_unscaled(11, false)); - /// data.append(FixedTrait::new_unscaled(12, false)); + /// fn reduce_log_sum_exp() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(3); + /// shape.append(2); + /// shape.append(2); /// - /// >>> ( - /// [[[1, 2] - /// [3, 4]] - /// - /// [[5, 6]] - /// [7, 8]] - /// - /// [[9, 10] - /// [11, 12]]] - /// ) - /// - /// - /// let tensor = TensorTrait::new(shape.span(), data.span()) + /// let mut data = ArrayTrait::new(); + /// data.append(FP32x32 { mag: 4294967296, sign: false }); + /// data.append(FP32x32 { mag: 8589934592, sign: false }); + /// data.append(FP32x32 { mag: 12884901888, sign: false }); + /// data.append(FP32x32 { mag: 17179869184, sign: false }); + /// data.append(FP32x32 { mag: 21474836480, sign: false }); + /// data.append(FP32x32 { mag: 25769803776, sign: false }); + /// data.append(FP32x32 { mag: 30064771072, sign: false }); + /// data.append(FP32x32 { mag: 34359738368, sign: false }); + /// data.append(FP32x32 { mag: 38654705664, sign: false }); + /// data.append(FP32x32 { mag: 42949672960, sign: false }); + /// data.append(FP32x32 { mag: 47244640256, sign: false }); + /// data.append(FP32x32 { mag: 51539607552, sign: false }); + /// TensorTrait::new(shape.span(), data.span()) + /// + /// let tensor = TensorTrait::::new(shape.span(), data.span()); + /// + /// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); + /// + /// } + /// /// - /// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false); - /// } - /// - /// >>> - /// ( - /// [[2.31, 4.31] - /// [6.31, 8.31] - /// [10.31, 12.31]] - /// ) - /// + /// >>> [[9215828, 16323477, 20115004], [22716772, 24699744, 26302432]] /// ``` - + /// fn reduce_log_sum_exp(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ## tensor.erf /// diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index 6bc7da160..0272f019c 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -10,30 +10,30 @@ use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; /// Cf: TensorTrait::reduce_log_sum_exp docstring -fn reduce_log_sum_exp_wide< - T, - TMAG, - W, - WMAG, - impl TIntoW: Into, - impl WTryIntoT: TryInto, - impl WCopy: Copy, - impl WDrop: Drop, - impl TCopy: Copy, - impl TDrop: Drop, - impl TDiv: Div, - impl TTensor: TensorTrait, - impl WTensor: TensorTrait, - impl TFixed: FixedTrait, - impl WFixed: FixedTrait ->( - self: @Tensor, axis: usize, keepdims: bool -) -> Tensor { - let tensor_exp: Tensor = exp_upcast(*self); - let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims); +// fn reduce_log_sum_exp_wide< +// T, +// TMAG, +// W, +// WMAG, +// impl TIntoW: Into, +// impl WTryIntoT: TryInto, +// impl WCopy: Copy, +// impl WDrop: Drop, +// impl TCopy: Copy, +// impl TDrop: Drop, +// impl TDiv: Div, +// impl TTensor: TensorTrait, +// impl WTensor: TensorTrait, +// impl TFixed: FixedTrait, +// impl WFixed: FixedTrait +// >( +// self: @Tensor, axis: usize, keepdims: bool +// ) -> Tensor { +// let tensor_exp: Tensor = exp_upcast(*self); +// let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims); - return tensor_exp_log_sum; -} +// return tensor_exp_log_sum; +// } fn reduce_log_sum_exp< T, diff --git a/tests/nodes.cairo b/tests/nodes.cairo index b346347b5..8a579a1ec 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -869,4 +869,7 @@ mod compress_u32_3d_axis1; mod compress_u32_3d_axis2; mod compress_u32_3d_axis2_2; mod compress_u32_3d_axis3; +mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +mod reduce_log_sum_exp_fp32x32_export_keepdims; +mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo new file mode 100644 index 000000000..ae11dfc55 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP32x32Tensor; +use orion::operators::tensor::FP32x32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp32x32_export_do_not_keepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.reduce_log_sum_exp(2, false); + + assert_eq(y, z); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo new file mode 100644 index 000000000..d6f2475e3 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo new file mode 100644 index 000000000..507762b24 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_do_not_keepdims/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 9935383294, sign: false }); + data.append(FP32x32 { mag: 18525317886, sign: false }); + data.append(FP32x32 { mag: 27115252478, sign: false }); + data.append(FP32x32 { mag: 35705187070, sign: false }); + data.append(FP32x32 { mag: 44295121662, sign: false }); + data.append(FP32x32 { mag: 52885056254, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo new file mode 100644 index 000000000..d9aed9c39 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP32x32Tensor; +use orion::operators::tensor::FP32x32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp32x32_export_keepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.reduce_log_sum_exp(2, true); + + assert_eq(y, z); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo new file mode 100644 index 000000000..d6f2475e3 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo new file mode 100644 index 000000000..04adcc345 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_keepdims/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 9935383294, sign: false }); + data.append(FP32x32 { mag: 18525317886, sign: false }); + data.append(FP32x32 { mag: 27115252478, sign: false }); + data.append(FP32x32 { mag: 35705187070, sign: false }); + data.append(FP32x32 { mag: 44295121662, sign: false }); + data.append(FP32x32 { mag: 52885056254, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo new file mode 100644 index 000000000..1b75f4815 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP32x32Tensor; +use orion::operators::tensor::FP32x32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.reduce_log_sum_exp(0, true); + + assert_eq(y, z); +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo new file mode 100644 index 000000000..d6f2475e3 --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 4294967296, sign: false }); + data.append(FP32x32 { mag: 8589934592, sign: false }); + data.append(FP32x32 { mag: 12884901888, sign: false }); + data.append(FP32x32 { mag: 17179869184, sign: false }); + data.append(FP32x32 { mag: 21474836480, sign: false }); + data.append(FP32x32 { mag: 25769803776, sign: false }); + data.append(FP32x32 { mag: 30064771072, sign: false }); + data.append(FP32x32 { mag: 34359738368, sign: false }); + data.append(FP32x32 { mag: 38654705664, sign: false }); + data.append(FP32x32 { mag: 42949672960, sign: false }); + data.append(FP32x32 { mag: 47244640256, sign: false }); + data.append(FP32x32 { mag: 51539607552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo new file mode 100644 index 000000000..75f47ee0f --- /dev/null +++ b/tests/nodes/reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims/output_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP32x32Tensor; +use orion::numbers::{FixedTrait, FP32x32}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP32x32 { mag: 38734073664, sign: false }); + data.append(FP32x32 { mag: 43029040960, sign: false }); + data.append(FP32x32 { mag: 47324008256, sign: false }); + data.append(FP32x32 { mag: 51618975552, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} From 683e8b471fe63e742f194c89e10a325f1339965f Mon Sep 17 00:00:00 2001 From: Gaki <153402253+GakiBash@users.noreply.github.com> Date: Tue, 13 Feb 2024 18:52:25 +0100 Subject: [PATCH 15/40] Updated pathj --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cc2cf1ef2..0d07bf3e3 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ For a full list of all authors and contributors, see [the contributors page](htt This project is licensed under the **MIT license**. -See [LICENSE](https://github.com/franalgaba/onnx-cairo/blob/main/LICENSE/README.md) for more information. +See [LICENSE](https://github.com/franalgaba/onnx-cairo/blob/main/LICENSE) for more information. ## Contributors ✨ From 56b1a727310a0e199206e383337e01aa54514d5c Mon Sep 17 00:00:00 2001 From: Gaki <153402253+GakiBash@users.noreply.github.com> Date: Tue, 13 Feb 2024 17:55:12 +0000 Subject: [PATCH 16/40] proofread --- docs/framework/operators/neural-network/README.md | 4 ++-- docs/framework/operators/neural-network/nn.conv.md | 2 +- .../operators/neural-network/nn.conv_transpose.md | 2 +- docs/framework/operators/tensor/tensor.min.md | 2 +- docs/framework/operators/tensor/tensor.qlinear_add.md | 2 +- docs/framework/operators/tensor/tensor.qlinear_matmul.md | 2 +- docs/framework/operators/tensor/tensor.qlinear_mul.md | 2 +- src/operators/nn/core.cairo | 8 ++++---- src/operators/tensor/core.cairo | 8 ++++---- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md index b24ad9e40..fc3bfb612 100644 --- a/docs/framework/operators/neural-network/README.md +++ b/docs/framework/operators/neural-network/README.md @@ -37,6 +37,6 @@ Orion supports currently these `NN` types. | [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. | | [`nn.grid_sample`](nn.grid\_sample.md) | Computes the grid sample of the input tensor and input grid. | | [`nn.col2im`](nn.col2im.md) | Rearranges column blocks back into a multidimensional image | -| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weigth tensor. | -| [`nn.conv`](nn.conv.md) | Performs the convolution of the input data tensor and weigth tensor. | +| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weight tensor. | +| [`nn.conv`](nn.conv.md) | Performs the convolution of the input data tensor and weight tensor. | diff --git a/docs/framework/operators/neural-network/nn.conv.md b/docs/framework/operators/neural-network/nn.conv.md index 086737f0b..fd7d53010 100644 --- a/docs/framework/operators/neural-network/nn.conv.md +++ b/docs/framework/operators/neural-network/nn.conv.md @@ -15,7 +15,7 @@ ) -> Tensor ``` -The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output. +The convolution operator consumes an input tensor and a filter (input weight tensor), and computes the output. ## Args diff --git a/docs/framework/operators/neural-network/nn.conv_transpose.md b/docs/framework/operators/neural-network/nn.conv_transpose.md index 29b2af6d2..83082fd94 100644 --- a/docs/framework/operators/neural-network/nn.conv_transpose.md +++ b/docs/framework/operators/neural-network/nn.conv_transpose.md @@ -16,7 +16,7 @@ ) -> Tensor ``` -The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output. +The convolution transpose operator consumes an input tensor and a input weight tensor, and computes the output. ## Args diff --git a/docs/framework/operators/tensor/tensor.min.md b/docs/framework/operators/tensor/tensor.min.md index 92bc2d150..12deae2e3 100644 --- a/docs/framework/operators/tensor/tensor.min.md +++ b/docs/framework/operators/tensor/tensor.min.md @@ -4,7 +4,7 @@ fn min(tensors: Span>) -> Tensor; ``` -Returns the element-wise minumum values from a list of input tensors +Returns the element-wise minimum values from a list of input tensors The input tensors must have either: * Exactly the same shape * The same number of dimensions and the length of each dimension is either a common length or 1. diff --git a/docs/framework/operators/tensor/tensor.qlinear_add.md b/docs/framework/operators/tensor/tensor.qlinear_add.md index b89987c21..bb997d9c3 100644 --- a/docs/framework/operators/tensor/tensor.qlinear_add.md +++ b/docs/framework/operators/tensor/tensor.qlinear_add.md @@ -8,7 +8,7 @@ Performs the sum of quantized Tensors It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -It perfoms the addition of the two vectors once dequantized, then return the quantization of the result of the addition. +It performs the addition of the two vectors once dequantized, then return the quantization of the result of the addition. The broadcasting is supported Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. diff --git a/docs/framework/operators/tensor/tensor.qlinear_matmul.md b/docs/framework/operators/tensor/tensor.qlinear_matmul.md index b5928a8bb..eb52f4c03 100644 --- a/docs/framework/operators/tensor/tensor.qlinear_matmul.md +++ b/docs/framework/operators/tensor/tensor.qlinear_matmul.md @@ -8,7 +8,7 @@ Multiplies quantized Tensors It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -It perfoms the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. +It performs the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. Then return the quantization of the result of the multiplication. Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. diff --git a/docs/framework/operators/tensor/tensor.qlinear_mul.md b/docs/framework/operators/tensor/tensor.qlinear_mul.md index e1877a137..aeedd3365 100644 --- a/docs/framework/operators/tensor/tensor.qlinear_mul.md +++ b/docs/framework/operators/tensor/tensor.qlinear_mul.md @@ -8,7 +8,7 @@ Performs the element-wise multiplication of quantized Tensors It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. The quantization formula is y = saturate((x / y_scale) + y_zero_point). -It perfoms the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. +It performs the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. The broadcasting is supported Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 93f9242c0..35d318b28 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -16,8 +16,8 @@ use orion::operators::tensor::core::Tensor; /// gemm - Performs General Matrix multiplication. /// grid_sample - Computes the grid sample of the input tensor and input grid. /// col2im - Rearranges column blocks back into a multidimensional image -/// conv_transpose - Performs the convolution transpose of the input data tensor and weigth tensor. -/// conv - Performs the convolution of the input data tensor and weigth tensor. +/// conv_transpose - Performs the convolution transpose of the input data tensor and weight tensor. +/// conv - Performs the convolution of the input data tensor and weight tensor. trait NNTrait { /// # NNTrait::relu /// @@ -834,7 +834,7 @@ trait NNTrait { /// ) -> Tensor /// ``` /// - /// The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output. + /// The convolution operator consumes an input tensor and a filter (input weight tensor), and computes the output. /// /// ## Args /// @@ -971,7 +971,7 @@ trait NNTrait { /// ) -> Tensor /// ``` /// - /// The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output. + /// The convolution transpose operator consumes an input tensor and a input weight tensor, and computes the output. /// /// ## Args /// diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 23018044d..ad79fce9d 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -275,7 +275,7 @@ trait TensorTrait { /// fn min(tensors: Span>) -> Tensor; /// ``` /// - /// Returns the element-wise minumum values from a list of input tensors + /// Returns the element-wise minimum values from a list of input tensors /// The input tensors must have either: /// * Exactly the same shape /// * The same number of dimensions and the length of each dimension is either a common length or 1. @@ -2578,7 +2578,7 @@ trait TensorTrait { /// /// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. /// The quantization formula is y = saturate((x / y_scale) + y_zero_point). - /// It perfoms the addition of the two vectors once dequantized, then return the quantization of the result of the addition. + /// It performs the addition of the two vectors once dequantized, then return the quantization of the result of the addition. /// The broadcasting is supported /// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). /// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. @@ -2676,7 +2676,7 @@ trait TensorTrait { /// /// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. /// The quantization formula is y = saturate((x / y_scale) + y_zero_point). - /// It perfoms the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. + /// It performs the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication. /// The broadcasting is supported /// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). /// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. @@ -2783,7 +2783,7 @@ trait TensorTrait { /// /// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. /// The quantization formula is y = saturate((x / y_scale) + y_zero_point). - /// It perfoms the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. + /// It performs the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes. /// Then return the quantization of the result of the multiplication. /// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b'). /// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization. From 8fddc9579a022139cde06cccedc293584b0e1999 Mon Sep 17 00:00:00 2001 From: TAdev0 Date: Fri, 16 Feb 2024 20:44:19 +0100 Subject: [PATCH 17/40] draft_refactor_loops_and_improve_gas_consumption --- src/numbers.cairo | 239 +++++---- src/numbers/complex_number/complex64.cairo | 46 +- .../implementations/fp16x16/core.cairo | 6 - .../implementations/fp16x16/helpers.cairo | 1 - .../implementations/fp8x23/core.cairo | 140 +++-- .../implementations/fp8x23/helpers.cairo | 1 - .../implementations/fp8x23/math/comp.cairo | 10 +- .../implementations/fp8x23/math/core.cairo | 100 ++-- .../implementations/fp8x23/math/erf.cairo | 2 +- .../implementations/fp8x23/math/hyp.cairo | 22 +- .../implementations/fp8x23/math/lut.cairo | 11 +- .../implementations/fp8x23/math/trig.cairo | 36 +- .../implementations/fp8x23wide/core.cairo | 136 +++-- .../implementations/fp8x23wide/helpers.cairo | 1 - .../fp8x23wide/math/comp.cairo | 32 +- .../fp8x23wide/math/core.cairo | 134 +++-- .../implementations/fp8x23wide/math/erf.cairo | 2 +- .../implementations/fp8x23wide/math/hyp.cairo | 23 +- .../implementations/fp8x23wide/math/lut.cairo | 11 +- .../fp8x23wide/math/trig.cairo | 46 +- src/numbers/fixed_point/utils.cairo | 2 +- src/operators/matrix.cairo | 145 ++--- .../tensor/implementations/tensor_bool.cairo | 47 +- .../implementations/tensor_complex64.cairo | 46 +- .../implementations/tensor_fp16x16.cairo | 61 +-- .../implementations/tensor_fp16x16wide.cairo | 60 +-- .../implementations/tensor_fp32x32.cairo | 55 +- .../implementations/tensor_fp64x64.cairo | 61 +-- .../implementations/tensor_fp8x23.cairo | 68 +-- .../implementations/tensor_fp8x23wide.cairo | 52 +- .../tensor/implementations/tensor_i32.cairo | 72 +-- .../tensor/implementations/tensor_i8.cairo | 32 +- .../tensor/implementations/tensor_u32.cairo | 58 +- src/operators/tensor/linalg/matmul.cairo | 41 +- src/operators/tensor/linalg/transpose.cairo | 43 +- src/operators/tensor/linalg/trilu.cairo | 110 ++-- .../tensor/fixed_point/fp16x16.cairo | 502 ++++++++---------- .../tensor/fixed_point/fp8x23.cairo | 500 ++++++++--------- src/test_helper/tensor/i32.cairo | 400 +++++--------- src/test_helper/tensor/i8.cairo | 397 ++++---------- src/test_helper/tensor/u32.cairo | 193 ++----- src/utils.cairo | 10 +- 42 files changed, 1613 insertions(+), 2341 deletions(-) diff --git a/src/numbers.cairo b/src/numbers.cairo index 156fc5bf5..ddd95fa10 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -1432,9 +1432,10 @@ impl FP32x32Number of NumberTrait { impl I8Number of NumberTrait { fn new(mag: i8, sign: bool) -> i8 { - if sign{ + if sign { return -mag; } + mag } @@ -1559,7 +1560,7 @@ impl I8Number of NumberTrait { fn abs(self: i8) -> i8 { if self >= 0 { - return self; + self } else { self * -1_i8 } @@ -1579,7 +1580,7 @@ impl I8Number of NumberTrait { fn min(self: i8, other: i8) -> i8 { if self < other { - return self; + self } else { other } @@ -1587,7 +1588,7 @@ impl I8Number of NumberTrait { fn max(self: i8, other: i8) -> i8 { if self > other { - return self; + self } else { other } @@ -1603,43 +1604,43 @@ impl I8Number of NumberTrait { fn xor(lhs: i8, rhs: i8) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i8, rhs: i8) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i8) -> i8 { if self == 0 { - return 0_i8; + 0_i8 } else if self > 0 { - return 1_i8; + 1_i8 } else { -1_i8 } } fn and(lhs: i8, rhs: i8) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i8, x: i8, y: i8) -> i8 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -1656,7 +1657,7 @@ impl I8Number of NumberTrait { } fn is_inf(self: i8) -> bool { - (self == 127 || self == -127) + self == 127 || self == -127 } fn is_pos_inf(self: i8) -> bool { @@ -1691,8 +1692,10 @@ impl I8Number of NumberTrait { impl I8Div of Div { fn div(lhs: i8, rhs: i8) -> i8 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -1700,6 +1703,7 @@ impl I8Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -1708,6 +1712,7 @@ impl I8Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -1727,11 +1732,14 @@ impl I8IntoFP8x23 of Into { fn into(self: i8) -> FP8x23 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); + FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } @@ -1740,11 +1748,14 @@ impl I8IntoFP16x16 of Into { fn into(self: i8) -> FP16x16 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); + FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } @@ -1753,11 +1764,14 @@ impl I8IntoFP64x64 of Into { fn into(self: i8) -> FP64x64 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); + FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } @@ -1766,20 +1780,24 @@ impl I8IntoFP32x32 of Into { fn into(self: i8) -> FP32x32 { let number_sign: bool = self < 0; let mut self_positive: i8 = self; + if number_sign { self_positive = self_positive * -1_i8 } + let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); + FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } impl I16Number of NumberTrait { fn new(mag: i16, sign: bool) -> i16 { - if sign{ + if sign { return -mag; } + mag } @@ -1904,7 +1922,7 @@ impl I16Number of NumberTrait { fn abs(self: i16) -> i16 { if self >= 0 { - return self; + self } else { self * -1_i16 } @@ -1924,7 +1942,7 @@ impl I16Number of NumberTrait { fn min(self: i16, other: i16) -> i16 { if self < other { - return self; + self } else { other } @@ -1932,7 +1950,7 @@ impl I16Number of NumberTrait { fn max(self: i16, other: i16) -> i16 { if self > other { - return self; + self } else { other } @@ -1948,43 +1966,43 @@ impl I16Number of NumberTrait { fn xor(lhs: i16, rhs: i16) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i16, rhs: i16) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i16) -> i16 { if self == 0 { - return 0_i16; + 0_i16 } else if self > 0 { - return 1_i16; + 1_i16 } else { -1_i16 } } fn and(lhs: i16, rhs: i16) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i16, x: i16, y: i16) -> i16 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2001,7 +2019,7 @@ impl I16Number of NumberTrait { } fn is_inf(self: i16) -> bool { - (self == 32767 || self == -32767) + self == 32767 || self == -32767 } fn is_pos_inf(self: i16) -> bool { @@ -2036,8 +2054,10 @@ impl I16Number of NumberTrait { impl I16Div of Div { fn div(lhs: i16, rhs: i16) -> i16 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2045,6 +2065,7 @@ impl I16Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2053,6 +2074,7 @@ impl I16Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -2070,9 +2092,10 @@ impl I16DivEq of DivEq { impl I32Number of NumberTrait { fn new(mag: i32, sign: bool) -> i32 { - if sign{ + if sign { return -mag; } + mag } @@ -2197,7 +2220,7 @@ impl I32Number of NumberTrait { fn abs(self: i32) -> i32 { if self >= 0 { - return self; + self } else { self * -1_i32 } @@ -2217,7 +2240,7 @@ impl I32Number of NumberTrait { fn min(self: i32, other: i32) -> i32 { if self < other { - return self; + self } else { other } @@ -2225,7 +2248,7 @@ impl I32Number of NumberTrait { fn max(self: i32, other: i32) -> i32 { if self > other { - return self; + self } else { other } @@ -2241,43 +2264,43 @@ impl I32Number of NumberTrait { fn xor(lhs: i32, rhs: i32) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i32, rhs: i32) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i32) -> i32 { if self == 0 { - return 0_i32; + 0_i32 } else if self > 0 { - return 1_i32; + 1_i32 } else { -1_i32 } } fn and(lhs: i32, rhs: i32) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i32, x: i32, y: i32) -> i32 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2294,7 +2317,7 @@ impl I32Number of NumberTrait { } fn is_inf(self: i32) -> bool { - (self == 2147483647 || self == -2147483647) + self == 2147483647 || self == -2147483647 } fn is_pos_inf(self: i32) -> bool { @@ -2329,8 +2352,10 @@ impl I32Number of NumberTrait { impl I32Div of Div { fn div(lhs: i32, rhs: i32) -> i32 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2338,6 +2363,7 @@ impl I32Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2346,6 +2372,7 @@ impl I32Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -2365,20 +2392,24 @@ impl I32IntoU32 of Into { fn into(self: i32) -> u32 { let number_sign: bool = self < 0; let mut self_positive: i32 = self; + if number_sign { self_positive = self_positive * -1_i32 } + let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); + number_u32 } } impl I64Number of NumberTrait { fn new(mag: i64, sign: bool) -> i64 { - if sign{ + if sign { return -mag; } + mag } @@ -2503,7 +2534,7 @@ impl I64Number of NumberTrait { fn abs(self: i64) -> i64 { if self >= 0 { - return self; + self } else { self * -1_i64 } @@ -2523,7 +2554,7 @@ impl I64Number of NumberTrait { fn min(self: i64, other: i64) -> i64 { if self < other { - return self; + self } else { other } @@ -2531,7 +2562,7 @@ impl I64Number of NumberTrait { fn max(self: i64, other: i64) -> i64 { if self > other { - return self; + self } else { other } @@ -2547,43 +2578,43 @@ impl I64Number of NumberTrait { fn xor(lhs: i64, rhs: i64) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i64, rhs: i64) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i64) -> i64 { if self == 0 { - return 0_i64; + 0_i64 } else if self > 0 { - return 1_i64; + 1_i64 } else { -1_i64 } } fn and(lhs: i64, rhs: i64) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i64, x: i64, y: i64) -> i64 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2600,7 +2631,7 @@ impl I64Number of NumberTrait { } fn is_inf(self: i64) -> bool { - (self == 9223372036854775807 || self == -9223372036854775807) + self == 9223372036854775807 || self == -9223372036854775807 } fn is_pos_inf(self: i64) -> bool { @@ -2635,8 +2666,10 @@ impl I64Number of NumberTrait { impl I64Div of Div { fn div(lhs: i64, rhs: i64) -> i64 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2644,6 +2677,7 @@ impl I64Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2652,6 +2686,7 @@ impl I64Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); + if lhs * rhs < 0 { signed_int_result * -1 } else { @@ -2669,9 +2704,10 @@ impl I64DivEq of DivEq { impl I128Number of NumberTrait { fn new(mag: i128, sign: bool) -> i128 { - if sign{ + if sign { return -mag; } + mag } @@ -2796,7 +2832,7 @@ impl I128Number of NumberTrait { fn abs(self: i128) -> i128 { if self >= 0 { - return self; + self } else { self * -1_i128 } @@ -2816,7 +2852,7 @@ impl I128Number of NumberTrait { fn min(self: i128, other: i128) -> i128 { if self < other { - return self; + self } else { other } @@ -2824,7 +2860,7 @@ impl I128Number of NumberTrait { fn max(self: i128, other: i128) -> i128 { if self > other { - return self; + self } else { other } @@ -2840,43 +2876,43 @@ impl I128Number of NumberTrait { fn xor(lhs: i128, rhs: i128) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: i128, rhs: i128) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } fn sign(self: i128) -> i128 { if self == 0 { - return 0_i128; + 0_i128 } else if self > 0 { - return 1_i128; + 1_i128 } else { -1_i128 } } fn and(lhs: i128, rhs: i128) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: i128, x: i128, y: i128) -> i128 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -2893,8 +2929,8 @@ impl I128Number of NumberTrait { } fn is_inf(self: i128) -> bool { - (self == 170141183460469231731687303715884105727 - || self == -170141183460469231731687303715884105727) + self == 170141183460469231731687303715884105727 + || self == -170141183460469231731687303715884105727 } fn is_pos_inf(self: i128) -> bool { @@ -2929,8 +2965,10 @@ impl I128Number of NumberTrait { impl I128Div of Div { fn div(lhs: i128, rhs: i128) -> i128 { assert(rhs != 0, 'divisor cannot be 0'); + let mut lhs_positive = lhs; let mut rhs_positive = rhs; + // making sure everything is positive if lhs < 0 { lhs_positive = lhs * -1; @@ -2938,6 +2976,7 @@ impl I128Div of Div { if rhs < 0 { rhs_positive = rhs * -1; } + //felt252 plays role of a bridge for type casting let lhs_felt: felt252 = lhs_positive.into(); let rhs_felt: felt252 = rhs_positive.into(); @@ -2946,6 +2985,7 @@ impl I128Div of Div { let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); + // assigning the sign and returning if lhs * rhs < 0 { signed_int_result * -1 @@ -3105,7 +3145,7 @@ impl u32Number of NumberTrait { fn min(self: u32, other: u32) -> u32 { if self < other { - return self; + self } else { other } @@ -3113,7 +3153,7 @@ impl u32Number of NumberTrait { fn max(self: u32, other: u32) -> u32 { if self > other { - return self; + self } else { other } @@ -3129,17 +3169,17 @@ impl u32Number of NumberTrait { fn xor(lhs: u32, rhs: u32) -> bool { if (lhs == 0 || rhs == 0) && lhs != rhs { - return true; + true } else { - return false; + false } } fn or(lhs: u32, rhs: u32) -> bool { - if (lhs == 0 && rhs == 0) { - return false; + if lhs == 0 && rhs == 0 { + false } else { - return true; + true } } @@ -3148,18 +3188,18 @@ impl u32Number of NumberTrait { } fn and(lhs: u32, rhs: u32) -> bool { - if (lhs == 0 || rhs == 0) { - return false; + if lhs == 0 || rhs == 0 { + false } else { - return true; + true } } fn where(self: u32, x: u32, y: u32) -> u32 { if self == 0 { - return y; + y } else { - return x; + x } } @@ -3324,6 +3364,7 @@ impl Complex64Number of NumberTrait { if self == Complex64Impl::zero() { return true; } + false } @@ -3343,6 +3384,7 @@ impl Complex64Number of NumberTrait { if self == Complex64Impl::one() { return true; } + false } @@ -3447,6 +3489,7 @@ impl U32IntoI32 of Into { fn into(self: u32) -> i32 { let number_felt: felt252 = self.into(); let number_i32: i32 = number_felt.try_into().unwrap(); + number_i32 } } diff --git a/src/numbers/complex_number/complex64.cairo b/src/numbers/complex_number/complex64.cairo index 20fb57f88..9edcb8d1a 100644 --- a/src/numbers/complex_number/complex64.cairo +++ b/src/numbers/complex_number/complex64.cairo @@ -15,7 +15,6 @@ struct complex64 { } // CONSTANTS for FP64x64 - const PI: u128 = 57952155664616982739; const HALF_PI: u128 = 28976077832308491370; const TWO: u128 = 36893488147419103232; @@ -40,15 +39,16 @@ impl Complex64Impl of ComplexTrait { } fn zero() -> complex64 { - return complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() }; + complex64 { real: FixedTrait::ZERO(), img: FP64x64Impl::ZERO() } } fn one() -> complex64 { - return complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() }; + complex64 { real: FP64x64Impl::ONE(), img: FP64x64Impl::ZERO() } } fn mag(self: complex64) -> FP64x64 { let two = FP64x64Impl::new(TWO, false); + (self.real.pow(two) + self.img.pow(two)).sqrt() } @@ -59,15 +59,16 @@ impl Complex64Impl of ComplexTrait { fn exp(self: complex64) -> complex64 { let real = self.real.exp() * self.img.cos(); let img = self.real.exp() * self.img.sin(); + complex64 { real, img } } fn exp2(self: complex64) -> complex64 { let two = complex64 { real: FP64x64Impl::new(TWO, false), img: FP64x64Impl::ZERO() }; + two.pow(self) } - fn sqrt(self: complex64) -> complex64 { let x = self.real; let y = self.img; @@ -78,26 +79,29 @@ impl Complex64Impl of ComplexTrait { } else { (((x.pow(two) + y.pow(two)).sqrt() - x) / two).sqrt() }; - let img = FP64x64Impl::new(img.mag, y.sign); + complex64 { real, img } } fn ln(self: complex64) -> complex64 { let real = self.mag().ln(); let img = self.arg(); + complex64 { real, img } } fn log2(self: complex64) -> complex64 { let ln_2 = FP64x64Impl::new(12786309186476892720, false); let ln = self.ln(); + complex64 { real: (ln.real / ln_2), img: (ln.img / ln_2) } } fn log10(self: complex64) -> complex64 { let ln_10 = FP64x64Impl::new(42475197399893398429, false); let ln = self.ln(); + complex64 { real: (ln.real / ln_10), img: (ln.img / ln_10) } } @@ -129,6 +133,7 @@ impl Complex64Impl of ComplexTrait { let B = b.real * self.arg() + b.img * self.mag().ln(); let real = A * B.cos(); let img = A * B.sin(); + complex64 { real, img } } @@ -136,17 +141,18 @@ impl Complex64Impl of ComplexTrait { fn cos(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::cos(a) * FP64x64Impl::cosh(b), img: -FP64x64Impl::sin(a) * FP64x64Impl::sinh(b) } } - //sin(z) = sin(a+bi) = sin(a)cosh(b)+icos(a)sinh(b) fn sin(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::sin(a) * FP64x64Impl::cosh(b), img: FP64x64Impl::cos(a) * FP64x64Impl::sinh(b) @@ -159,6 +165,7 @@ impl Complex64Impl of ComplexTrait { let a = self.real; let b = self.img; let den = FP64x64Impl::cosh(two * b) + FP64x64Impl::cos(two * a); + complex64 { real: FP64x64Impl::sin(two * a) / den, img: FP64x64Impl::sinh(two * b) / den } } @@ -184,7 +191,6 @@ impl Complex64Impl of ComplexTrait { asin } - //atan(z) = 1/2 * i[ln (1 - iz) - ln(1 + iz)] fn atan(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); @@ -198,7 +204,6 @@ impl Complex64Impl of ComplexTrait { atan } - //acosh(z) = ln (z + sqrt(z + 1) * sqrt(z - 1)) fn acosh(self: complex64) -> complex64 { let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); @@ -218,7 +223,6 @@ impl Complex64Impl of ComplexTrait { asinh } - //atanh(z) = 1/2 * [ln (1 + z) - ln(1 - z)] fn atanh(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); @@ -232,6 +236,7 @@ impl Complex64Impl of ComplexTrait { fn cosh(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::cosh(a) * FP64x64Impl::cos(b), img: FP64x64Impl::sinh(a) * FP64x64Impl::sin(b) @@ -242,6 +247,7 @@ impl Complex64Impl of ComplexTrait { fn sinh(self: complex64) -> complex64 { let a = self.real; let b = self.img; + complex64 { real: FP64x64Impl::sinh(a) * FP64x64Impl::cos(b), img: FP64x64Impl::cosh(a) * FP64x64Impl::sin(b) @@ -254,6 +260,7 @@ impl Complex64Impl of ComplexTrait { let a = self.real; let b = self.img; let den = FP64x64Impl::cosh(two * a) + FP64x64Impl::cos(two * b); + complex64 { real: FP64x64Impl::sinh(two * a) / den, img: FP64x64Impl::sin(two * b) / den } } @@ -261,12 +268,14 @@ impl Complex64Impl of ComplexTrait { fn to_polar(self: complex64) -> (FP64x64, FP64x64) { let mag = self.mag(); let arg = self.arg(); - return (mag, arg); + + (mag, arg) } fn from_polar(mag: FP64x64, arg: FP64x64) -> complex64 { let real = mag * arg.cos(); let img = mag * arg.sin(); + complex64 { real, img } } @@ -277,6 +286,7 @@ impl Complex64Impl of ComplexTrait { let real = x / (x.pow(two) + y.pow(two)); let img = -y / (x.pow(two) + y.pow(two)); + complex64 { real, img } } } @@ -361,7 +371,6 @@ impl Complex64DivEq of DivEq { } } - // Implements the PartialEq trait for complex64. impl Complex64PartialEq of PartialEq { fn eq(lhs: @complex64, rhs: @complex64) -> bool { @@ -394,7 +403,8 @@ impl Complex64Neg of Neg { fn complex64_add(a: complex64, b: complex64) -> complex64 { let real = a.real + b.real; let img = a.img + b.img; - return ComplexTrait::new(real, img); + + ComplexTrait::new(real, img) } // Subtracts complex64 complex numbers. @@ -409,7 +419,8 @@ fn complex64_add(a: complex64, b: complex64) -> complex64 { fn complex64_sub(a: complex64, b: complex64) -> complex64 { let real = a.real - b.real; let img = a.img - b.img; - return ComplexTrait::new(real, img); + + ComplexTrait::new(real, img) } // Multiplies two complex64 integers. @@ -427,7 +438,8 @@ fn complex64_sub(a: complex64, b: complex64) -> complex64 { fn complex64_mul(a: complex64, b: complex64) -> complex64 { let real = a.real * b.real - a.img * b.img; let img = a.real * b.img + a.img * b.real; - return ComplexTrait::new(real, img); + + ComplexTrait::new(real, img) } // Divides the first complex64 by the second complex64. @@ -452,7 +464,7 @@ fn complex64_eq(a: complex64, b: complex64) -> bool { return true; } - return false; + false } // Compares two complex64 complex numbers for inequality. @@ -463,7 +475,7 @@ fn complex64_eq(a: complex64, b: complex64) -> bool { // * `bool` - `true` if the two complex numbers are not equal, `false` otherwise. fn complex64_ne(a: complex64, b: complex64) -> bool { // The result is the inverse of the equal function. - return !complex64_eq(a, b); + !complex64_eq(a, b) } // Negates the given complex64 complex number. @@ -473,5 +485,5 @@ fn complex64_ne(a: complex64, b: complex64) -> bool { // * `complex64` - The negation of `x`. fn complex64_neg(x: complex64) -> complex64 { // The negation of an complex number is obtained by negating its real part and its imaginary part. - return ComplexTrait::new(-x.real, -x.img); + ComplexTrait::new(-x.real, -x.img) } diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index d39820ce8..421339dae 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::fixed_point::implementations::fp16x16::math::{ core as core_math, trig, hyp, erf @@ -18,13 +14,11 @@ struct FP16x16 { } // CONSTANTS - const TWO: u32 = 131072; // 2 ** 17 const ONE: u32 = 65536; // 2 ** 16 const HALF: u32 = 32768; // 2 ** 15 const MAX: u32 = 2147483648; // 2 ** 31 - impl FP16x16Impl of FixedTrait { fn ZERO() -> FP16x16 { return FP16x16 { mag: 0, sign: false }; diff --git a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo index 0cd5a8f0f..d18fc4108 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Sub, FP16x16Div, FixedTrait, FP16x16Print diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index 20b0788f3..7e44554dd 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::math::{core as core_math, trig, hyp, erf}; use orion::numbers::fixed_point::utils; @@ -16,7 +12,6 @@ struct FP8x23 { } // CONSTANTS - const TWO: u32 = 16777216; // 2 ** 24 const ONE: u32 = 8388608; // 2 ** 23 const HALF: u32 = 4194304; // 2 ** 22 @@ -25,169 +20,170 @@ const MAX: u32 = 2147483648; // 2 ** 31 impl FP8x23Impl of FixedTrait { fn ZERO() -> FP8x23 { - return FP8x23 { mag: 0, sign: false }; + FP8x23 { mag: 0, sign: false } } fn HALF() -> FP8x23 { - return FP8x23 { mag: HALF, sign: false }; + FP8x23 { mag: HALF, sign: false } } fn ONE() -> FP8x23 { - return FP8x23 { mag: ONE, sign: false }; + FP8x23 { mag: ONE, sign: false } } fn MAX() -> FP8x23 { - return FP8x23 { mag: MAX, sign: false }; + FP8x23 { mag: MAX, sign: false } } fn new(mag: u32, sign: bool) -> FP8x23 { - return FP8x23 { mag: mag, sign: sign }; + FP8x23 { mag: mag, sign: sign } } fn new_unscaled(mag: u32, sign: bool) -> FP8x23 { - return FP8x23 { mag: mag * ONE, sign: sign }; + FP8x23 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP8x23 { let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP8x23) -> FP8x23 { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP8x23) -> FP8x23 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP8x23) -> FP8x23 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP8x23) -> FP8x23 { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP8x23) -> FP8x23 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP8x23) -> FP8x23 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP8x23) -> FP8x23 { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP8x23) -> FP8x23 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP8x23) -> FP8x23 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP8x23) -> FP8x23 { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP8x23) -> FP8x23 { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP8x23) -> FP8x23 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP8x23) -> FP8x23 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP8x23) -> FP8x23 { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP8x23) -> FP8x23 { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP8x23) -> FP8x23 { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP8x23) -> FP8x23 { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP8x23) -> FP8x23 { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP8x23) -> FP8x23 { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP8x23) -> FP8x23 { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP8x23, b: FP8x23) -> FP8x23 { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP8x23) -> FP8x23 { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP8x23) -> FP8x23 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP8x23) -> FP8x23 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP8x23) -> FP8x23 { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP8x23) -> FP8x23 { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP8x23) -> FP8x23 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP8x23) -> FP8x23 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP8x23) -> FP8x23 { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP8x23) -> FP8x23 { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP8x23 { - return FP8x23 { mag: 0, sign: true }; + FP8x23 { mag: 0, sign: true } } fn is_nan(self: FP8x23) -> bool { @@ -195,15 +191,15 @@ impl FP8x23Impl of FixedTrait { } fn INF() -> FP8x23 { - return FP8x23 { mag: 4294967295, sign: false }; + FP8x23 { mag: 4294967295, sign: false } } fn POS_INF() -> FP8x23 { - return FP8x23 { mag: 4294967295, sign: false }; + FP8x23 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP8x23 { - return FP8x23 { mag: 4294967295, sign: true }; + FP8x23 { mag: 4294967295, sign: true } } fn is_inf(self: FP8x23) -> bool { @@ -219,11 +215,10 @@ impl FP8x23Impl of FixedTrait { } fn erf(self: FP8x23) -> FP8x23 { - return erf::erf(self); + erf::erf(self) } } - impl FP8x23Print of PrintTrait { fn print(self: FP8x23) { self.sign.print(); @@ -237,9 +232,9 @@ impl FP8x23IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -247,10 +242,10 @@ impl FP8x23IntoFelt252 of Into { impl FP8x23TryIntoU128 of TryInto { fn try_into(self: FP8x23) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -258,10 +253,10 @@ impl FP8x23TryIntoU128 of TryInto { impl FP8x23TryIntoU64 of TryInto { fn try_into(self: FP8x23) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -269,10 +264,10 @@ impl FP8x23TryIntoU64 of TryInto { impl FP8x23TryIntoU32 of TryInto { fn try_into(self: FP8x23) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some(self.mag / ONE); + Option::Some(self.mag / ONE) } } } @@ -283,7 +278,7 @@ impl FP8x23TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -294,7 +289,7 @@ impl FP8x23TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -314,18 +309,18 @@ impl FP8x23TryIntoI8 of TryInto { impl FP8x23PartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP8x23, rhs: @FP8x23) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP8x23Add of Add { fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -338,7 +333,7 @@ impl FP8x23AddEq of AddEq { impl FP8x23Sub of Sub { fn sub(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -351,7 +346,7 @@ impl FP8x23SubEq of SubEq { impl FP8x23Mul of Mul { fn mul(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -364,7 +359,7 @@ impl FP8x23MulEq of MulEq { impl FP8x23Div of Div { fn div(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -378,48 +373,49 @@ impl FP8x23DivEq of DivEq { impl FP8x23PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP8x23, rhs: FP8x23) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP8x23Neg of Neg { #[inline(always)] fn neg(a: FP8x23) -> FP8x23 { - return core_math::neg(a); + core_math::neg(a) } } impl FP8x23Rem of Rem { #[inline(always)] fn rem(lhs: FP8x23, rhs: FP8x23) -> FP8x23 { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } /// INTERNAL - fn _i32_into_fp(x: FP8x23) -> i32 { // i32 { mag: x.mag / ONE, sign: x.sign } let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); + if x.sign { return number_i32 * -1_i32; } + number_i32 } @@ -430,9 +426,11 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); + if x.sign { return Option::Some(number_i8 * -1_i8); } + Option::Some(number_i8) }, Option::None => Option::None(()) diff --git a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo index 58e0ca344..9b6136501 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Sub, FP8x23Div, FixedTrait, FP8x23Print diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo index 366f385e8..81ab12895 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo @@ -28,6 +28,7 @@ fn xor(a: FP8x23, b: FP8x23) -> bool { fn or(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); + if a == zero && b == zero { return false; } else { @@ -37,6 +38,7 @@ fn or(a: FP8x23, b: FP8x23) -> bool { fn and(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); + if a == zero || b == zero { return false; } else { @@ -53,15 +55,15 @@ fn where(a: FP8x23, b: FP8x23, c: FP8x23) -> FP8x23 { } fn bitwise_and(a: FP8x23, b: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP8x23, b: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP8x23, b: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -107,6 +109,7 @@ mod tests { assert(min(c, b) == c, 'min(c, b)'); assert(min(c, c) == c, 'min(c, c)'); } + #[test] fn test_bitwise_and() { let a = FixedTrait::new(28835840, false); // 3.4375 @@ -124,6 +127,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo index 0e0b3fa48..60b98877a 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul}; use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, MAX, FP8x23, FP8x23Add, FP8x23Impl, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ use orion::numbers::fixed_point::implementations::fp8x23::math::lut; // PUBLIC - fn abs(a: FP8x23) -> FP8x23 { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP8x23, b: FP8x23) -> FP8x23 { @@ -35,7 +29,7 @@ fn add(a: FP8x23, b: FP8x23) -> FP8x23 { } fn ceil(a: FP8x23) -> FP8x23 { - let (div, rem) = u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { return a; @@ -53,16 +47,16 @@ fn div(a: FP8x23, b: FP8x23) -> FP8x23 { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP8x23, b: @FP8x23) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP8x23) -> FP8x23 { - return exp2(FixedTrait::new(12102203, false) * a); // log2(e) * 2^23 ≈ 12102203 + exp2(FixedTrait::new(12102203, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP8x23) -> FP8x23 { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -88,7 +82,7 @@ fn exp2(a: FP8x23) -> FP8x23 { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { + if a.sign { return FixedTrait::ONE() / res_u; } else { return res_u; @@ -96,11 +90,11 @@ fn exp2(a: FP8x23) -> FP8x23 { } fn exp2_int(exp: u32) -> FP8x23 { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP8x23) -> FP8x23 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { return a; @@ -138,13 +132,13 @@ fn le(a: FP8x23, b: FP8x23) -> bool { // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP8x23) -> FP8x23 { - return FixedTrait::new(5814540, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(5814540, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP8x23) -> FP8x23 { - assert(a.sign == false, 'must be positive'); + assert(!a.sign, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); @@ -176,7 +170,7 @@ fn log2(a: FP8x23) -> FP8x23 { // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP8x23) -> FP8x23 { - return FixedTrait::new(2525223, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(2525223, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP8x23, b: FP8x23) -> bool { @@ -191,11 +185,11 @@ fn mul(a: FP8x23, b: FP8x23) -> FP8x23 { let prod_u128 = integer::u32_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP8x23, b: @FP8x23) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP8x23) -> FP8x23 { @@ -212,7 +206,7 @@ fn neg(a: FP8x23) -> FP8x23 { // self is a FP8x23 point value // b is a FP8x23 point value fn pow(a: FP8x23, b: FP8x23) -> FP8x23 { - let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); + let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -220,7 +214,7 @@ fn pow(a: FP8x23, b: FP8x23) -> FP8x23 { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -228,7 +222,7 @@ fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -239,11 +233,7 @@ fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { let mut y = FixedTrait::ONE(); let two = integer::u32_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u32_safe_divmod(n, two); if rem == 1 { @@ -254,15 +244,15 @@ fn pow_int(a: FP8x23, b: u32, sign: bool) -> FP8x23 { n = div; }; - return x * y; + x * y } fn rem(a: FP8x23, b: FP8x23) -> FP8x23 { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP8x23) -> FP8x23 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { return FixedTrait::new_unscaled(div + 1, a.sign); @@ -274,14 +264,15 @@ fn round(a: FP8x23) -> FP8x23 { // Calculates the square root of a FP8x23 point value // x must be positive fn sqrt(a: FP8x23) -> FP8x23 { - assert(a.sign == false, 'must be positive'); + assert(!(a.sign), 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP8x23, b: FP8x23) -> FP8x23 { - return add(a, -b); + add(a, -b) } fn sign(a: FP8x23) -> FP8x23 { @@ -388,7 +379,6 @@ mod tests { assert(sqrt(a).mag == 5 * ONE, 'invalid pos root'); } - #[test] #[available_gas(100000)] fn test_msb() { @@ -472,7 +462,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -480,7 +470,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -554,12 +544,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -572,17 +562,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -595,12 +585,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -610,17 +600,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo index 8121e170b..5e05783a2 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/erf.cairo @@ -1,4 +1,3 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE, FP8x23, FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::math::lut::erf_lut; @@ -21,5 +20,6 @@ fn erf(x: FP8x23) -> FP8x23 { } else { erf_value = ONE; } + FP8x23 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo index e2059d848..16d427366 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo @@ -1,4 +1,5 @@ use core::debug::PrintTrait; + use orion::numbers::fixed_point::implementations::fp8x23::core::{ HALF, ONE, TWO, FP8x23, FP8x23Impl, FP8x23Add, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq, FP8x23TryIntoU128, FP8x23PartialEq, FP8x23PartialOrd, FP8x23SubEq, FP8x23Neg, FP8x23Div, @@ -8,48 +9,51 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP8x23) -> FP8x23 { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP8x23) -> FP8x23 { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP8x23) -> FP8x23 { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP8x23) -> FP8x23 { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP8x23) -> FP8x23 { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP8x23) -> FP8x23 { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo index fdb9dfea3..27136c99a 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/lut.cairo @@ -29,7 +29,7 @@ fn msb(whole: u32) -> (u32, u32) { } } - return (8, 256); + (8, 256) } fn exp2(exp: u32) -> u32 { @@ -106,7 +106,7 @@ fn exp2(exp: u32) -> u32 { } } - return 8388608; + 8388608 } fn sin(a: u32) -> (u32, u32, u32) { @@ -923,7 +923,7 @@ fn sin(a: u32) -> (u32, u32, u32) { } } - return (13125323, 8388450, 8388608); + (13125323, 8388450, 8388608) } fn atan(a: u32) -> (u32, u32, u32) { @@ -1227,7 +1227,7 @@ fn atan(a: u32) -> (u32, u32, u32) { return (5754585, 5043802, 5083601); } - return (5813305, 5083601, 5123141); + (5813305, 5083601, 5123141) } fn erf_lut(x: u32) -> u32 { @@ -1919,5 +1919,6 @@ fn erf_lut(x: u32) -> u32 { return 8388595; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo index 11aec54ad..0adddc539 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u32_safe_divmod, u32_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp8x23::math::lut; use orion::numbers::fixed_point::implementations::fp8x23::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ }; // CONSTANTS - const TWO_PI: u32 = 52707178; const PI: u32 = 26353589; const HALF_PI: u32 = 13176795; @@ -48,7 +45,8 @@ fn asin(a: FP8x23) -> FP8x23 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP8x23) -> FP8x23 { @@ -57,7 +55,8 @@ fn asin_fast(a: FP8x23) -> FP8x23 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,7 +99,7 @@ fn atan(a: FP8x23) -> FP8x23 { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP8x23) -> FP8x23 { @@ -134,31 +133,32 @@ fn atan_fast(a: FP8x23) -> FP8x23 { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP8x23) -> FP8x23 { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP8x23) -> FP8x23 { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP8x23) -> FP8x23 { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP8x23) -> FP8x23 { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -178,14 +178,16 @@ fn tan(a: FP8x23) -> FP8x23 { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP8x23) -> FP8x23 { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -198,15 +200,13 @@ fn _sin_loop(a: FP8x23, i: u32, acc: FP8x23) -> FP8x23 { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index 1fac5453d..4804e6fda 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::{fixed_point::core::{FixedTrait}, FP8x23}; use orion::numbers::fixed_point::implementations::fp8x23wide::math::{ core as core_math, trig, hyp, erf @@ -18,178 +14,176 @@ struct FP8x23W { } // CONSTANTS - const TWO: u64 = 16777216; // 2 ** 24 const ONE: u64 = 8388608; // 2 ** 23 const HALF: u64 = 4194304; // 2 ** 22 const MAX: u64 = 2147483648; // 2 ** 31 - impl FP8x23WImpl of FixedTrait { fn ZERO() -> FP8x23W { - return FP8x23W { mag: 0, sign: false }; + FP8x23W { mag: 0, sign: false } } fn HALF() -> FP8x23W { - return FP8x23W { mag: HALF, sign: false }; + FP8x23W { mag: HALF, sign: false } } fn ONE() -> FP8x23W { - return FP8x23W { mag: ONE, sign: false }; + FP8x23W { mag: ONE, sign: false } } fn MAX() -> FP8x23W { - return FP8x23W { mag: MAX, sign: false }; + FP8x23W { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP8x23W { - return FP8x23W { mag: mag, sign: sign }; + FP8x23W { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP8x23W { - return FP8x23W { mag: mag * ONE, sign: sign }; + FP8x23W { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP8x23W { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP8x23W) -> FP8x23W { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP8x23W) -> FP8x23W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP8x23W) -> FP8x23W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP8x23W) -> FP8x23W { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP8x23W) -> FP8x23W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP8x23W) -> FP8x23W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP8x23W) -> FP8x23W { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP8x23W) -> FP8x23W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP8x23W) -> FP8x23W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP8x23W) -> FP8x23W { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP8x23W) -> FP8x23W { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP8x23W) -> FP8x23W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP8x23W) -> FP8x23W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP8x23W) -> FP8x23W { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP8x23W) -> FP8x23W { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP8x23W) -> FP8x23W { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP8x23W) -> FP8x23W { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP8x23W) -> FP8x23W { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP8x23W) -> FP8x23W { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP8x23W) -> FP8x23W { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP8x23W, b: FP8x23W) -> FP8x23W { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP8x23W) -> FP8x23W { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP8x23W) -> FP8x23W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP8x23W) -> FP8x23W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP8x23W) -> FP8x23W { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP8x23W) -> FP8x23W { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP8x23W) -> FP8x23W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP8x23W) -> FP8x23W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP8x23W) -> FP8x23W { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP8x23W) -> FP8x23W { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP8x23W { - return FP8x23W { mag: 0, sign: true }; + FP8x23W { mag: 0, sign: true } } fn is_nan(self: FP8x23W) -> bool { @@ -197,15 +191,15 @@ impl FP8x23WImpl of FixedTrait { } fn INF() -> FP8x23W { - return FP8x23W { mag: 4294967295, sign: false }; + FP8x23W { mag: 4294967295, sign: false } } fn POS_INF() -> FP8x23W { - return FP8x23W { mag: 4294967295, sign: false }; + FP8x23W { mag: 4294967295, sign: false } } fn NEG_INF() -> FP8x23W { - return FP8x23W { mag: 4294967295, sign: true }; + FP8x23W { mag: 4294967295, sign: true } } fn is_inf(self: FP8x23W) -> bool { @@ -221,7 +215,7 @@ impl FP8x23WImpl of FixedTrait { } fn erf(self: FP8x23W) -> FP8x23W { - return erf::erf(self); + erf::erf(self) } } @@ -239,9 +233,9 @@ impl FP8x23WIntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -264,10 +258,10 @@ impl FP8x23WTryIntoFP8x23 of TryInto { impl FP8x23WTryIntoU128 of TryInto { fn try_into(self: FP8x23W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -275,10 +269,10 @@ impl FP8x23WTryIntoU128 of TryInto { impl FP8x23WTryIntoU64 of TryInto { fn try_into(self: FP8x23W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -289,7 +283,7 @@ impl FP8x23WTryIntoU32 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -300,7 +294,7 @@ impl FP8x23WTryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -311,7 +305,7 @@ impl FP8x23WTryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -331,18 +325,18 @@ impl FP8x23WTryIntoI8 of TryInto { impl FP8x23WPartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP8x23WAdd of Add { fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -355,7 +349,7 @@ impl FP8x23WAddEq of AddEq { impl FP8x23WSub of Sub { fn sub(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -368,7 +362,7 @@ impl FP8x23WSubEq of SubEq { impl FP8x23WMul of Mul { fn mul(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -381,7 +375,7 @@ impl FP8x23WMulEq of MulEq { impl FP8x23WDiv of Div { fn div(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -395,36 +389,36 @@ impl FP8x23WDivEq of DivEq { impl FP8x23WPartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP8x23W, rhs: FP8x23W) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP8x23WNeg of Neg { #[inline(always)] fn neg(a: FP8x23W) -> FP8x23W { - return core_math::neg(a); + core_math::neg(a) } } impl FP8x23WRem of Rem { #[inline(always)] fn rem(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } @@ -433,9 +427,11 @@ impl FP8x23WRem of Rem { fn _i32_into_fp(x: FP8x23W) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); + if x.sign { return number_i32 * -1_i32; } + number_i32 } @@ -446,9 +442,11 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); + if x.sign { return Option::Some(number_i8 * -1_i8); } + Option::Some(number_i8) }, Option::None => Option::None(()) diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo index b34475914..2ea2c1aff 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WSub, FP8x23WDiv, FixedTrait, FP8x23WPrint diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo index b2dad2e6d..bb1c373ea 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo @@ -4,64 +4,64 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ fn max(a: FP8x23W, b: FP8x23W) -> FP8x23W { if (a >= b) { - return a; + a } else { - return b; + b } } fn min(a: FP8x23W, b: FP8x23W) -> FP8x23W { if (a <= b) { - return a; + a } else { - return b; + b } } fn xor(a: FP8x23W, b: FP8x23W) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP8x23W, b: FP8x23W) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP8x23W, b: FP8x23W) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP8x23W, b: FP8x23W, c: FP8x23W) -> FP8x23W { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -70,7 +70,6 @@ fn bitwise_or(a: FP8x23W, b: FP8x23W) -> FP8x23W { mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; - #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); @@ -126,6 +125,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(28835840, false); // 3.4375 let b = FixedTrait::new(1639448576, true); // -60.5625 diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo index 3d89ccce0..54d23b9e2 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul}; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, MAX, FP8x23W, FP8x23WAdd, FP8x23WImpl, FP8x23WAddEq, FP8x23WSub, FP8x23WMul, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut; // PUBLIC - fn abs(a: FP8x23W) -> FP8x23W { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP8x23W, b: FP8x23W) -> FP8x23W { @@ -35,7 +29,7 @@ fn add(a: FP8x23W, b: FP8x23W) -> FP8x23W { } fn ceil(a: FP8x23W) -> FP8x23W { - let (div, rem) = u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { return a; @@ -53,16 +47,16 @@ fn div(a: FP8x23W, b: FP8x23W) -> FP8x23W { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP8x23W, b: @FP8x23W) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP8x23W) -> FP8x23W { - return exp2(FixedTrait::new(12102203, false) * a); // log2(e) * 2^23 ≈ 12102203 + exp2(FixedTrait::new(12102203, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP8x23W) -> FP8x23W { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -88,19 +82,19 @@ fn exp2(a: FP8x23W) -> FP8x23W { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u64) -> FP8x23W { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP8x23W) -> FP8x23W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { return a; @@ -113,38 +107,38 @@ fn floor(a: FP8x23W) -> FP8x23W { fn ge(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP8x23W) -> FP8x23W { - return FixedTrait::new(5814540, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(5814540, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(a: FP8x23W) -> FP8x23W { - assert(a.sign == false, 'must be positive'); + assert(!a.sign, 'must be positive'); if (a.mag == ONE) { return FixedTrait::ZERO(); @@ -158,7 +152,7 @@ fn log2(a: FP8x23W) -> FP8x23W { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(76243, true) * norm; @@ -169,21 +163,22 @@ fn log2(a: FP8x23W) -> FP8x23W { let r3 = (r4 + FixedTrait::new(77896489, false)) * norm; let r2 = (r3 + FixedTrait::new(83945943, true)) * norm; let r1 = (r2 + FixedTrait::new(68407458, false)) * norm; - return r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP8x23W) -> FP8x23W { - return FixedTrait::new(2525223, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(2525223, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP8x23W, b: FP8x23W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -191,20 +186,20 @@ fn mul(a: FP8x23W, b: FP8x23W) -> FP8x23W { let prod_u128 = integer::u64_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP8x23W, b: @FP8x23W) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP8x23W) -> FP8x23W { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -212,7 +207,7 @@ fn neg(a: FP8x23W) -> FP8x23W { // self is a FP8x23W point value // b is a FP8x23W point value fn pow(a: FP8x23W, b: FP8x23W) -> FP8x23W { - let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); + let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -220,7 +215,7 @@ fn pow(a: FP8x23W, b: FP8x23W) -> FP8x23W { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -228,7 +223,7 @@ fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -239,11 +234,7 @@ fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { let mut y = FixedTrait::ONE(); let two = integer::u64_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u64_safe_divmod(n, two); if rem == 1 { @@ -254,34 +245,35 @@ fn pow_int(a: FP8x23W, b: u64, sign: bool) -> FP8x23W { n = div; }; - return x * y; + x * y } fn rem(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP8x23W) -> FP8x23W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } // Calculates the square root of a FP8x23W point value // x must be positive fn sqrt(a: FP8x23W) -> FP8x23W { - assert(a.sign == false, 'must be positive'); + assert(!a.sign, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP8x23W, b: FP8x23W) -> FP8x23W { - return add(a, -b); + add(a, -b) } fn sign(a: FP8x23W) -> FP8x23W { @@ -472,7 +464,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -480,7 +472,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -554,12 +546,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -572,17 +564,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -595,12 +587,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -610,17 +602,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo index 83f33f9ad..ec741bff9 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/erf.cairo @@ -1,4 +1,3 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ONE, FP8x23W, FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut::erf_lut; @@ -21,5 +20,6 @@ fn erf(x: FP8x23W) -> FP8x23W { } else { erf_value = ONE; } + FP8x23W { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo index 848f711a2..928de48c8 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo @@ -1,4 +1,3 @@ -use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ HALF, ONE, TWO, FP8x23W, FP8x23WImpl, FP8x23WAdd, FP8x23WAddEq, FP8x23WSub, FP8x23WMul, FP8x23WMulEq, FP8x23WTryIntoU128, FP8x23WPartialEq, FP8x23WPartialOrd, FP8x23WSubEq, FP8x23WNeg, @@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP8x23W) -> FP8x23W { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP8x23W) -> FP8x23W { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP8x23W) -> FP8x23W { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP8x23W) -> FP8x23W { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; - #[test] #[available_gas(10000000)] fn test_cosh() { diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo index eea11e46a..20b9a2f3b 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/lut.cairo @@ -29,7 +29,7 @@ fn msb(whole: u64) -> (u64, u64) { } } - return (8, 256); + (8, 256) } fn exp2(exp: u64) -> u64 { @@ -106,7 +106,7 @@ fn exp2(exp: u64) -> u64 { } } - return 8388608; + 8388608 } fn sin(a: u64) -> (u64, u64, u64) { @@ -923,7 +923,7 @@ fn sin(a: u64) -> (u64, u64, u64) { } } - return (13125323, 8388450, 8388608); + (13125323, 8388450, 8388608) } fn atan(a: u64) -> (u64, u64, u64) { @@ -1227,7 +1227,7 @@ fn atan(a: u64) -> (u64, u64, u64) { return (5754585, 5043802, 5083601); } - return (5813305, 5083601, 5123141); + (5813305, 5083601, 5123141) } fn erf_lut(x: u64) -> u64 { @@ -1919,5 +1919,6 @@ fn erf_lut(x: u64) -> u64 { return 8388595; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo index f2074215c..5d3055640 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u64_safe_divmod, u64_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ }; // CONSTANTS - const TWO_PI: u64 = 52707178; const PI: u64 = 26353589; const HALF_PI: u64 = 13176795; @@ -23,9 +20,9 @@ fn acos(a: FP8x23W) -> FP8x23W { let asin_res = asin(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -34,9 +31,9 @@ fn acos_fast(a: FP8x23W) -> FP8x23W { let asin_res = asin_fast(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP8x23W) -> FP8x23W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP8x23W) -> FP8x23W { @@ -57,7 +55,8 @@ fn asin_fast(a: FP8x23W) -> FP8x23W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,7 +99,7 @@ fn atan(a: FP8x23W) -> FP8x23W { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } fn atan_fast(a: FP8x23W) -> FP8x23W { @@ -134,31 +133,32 @@ fn atan_fast(a: FP8x23W) -> FP8x23W { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP8x23W) -> FP8x23W { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP8x23W) -> FP8x23W { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP8x23W) -> FP8x23W { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP8x23W) -> FP8x23W { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -170,7 +170,7 @@ fn sin_fast(a: FP8x23W) -> FP8x23W { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -178,14 +178,16 @@ fn tan(a: FP8x23W) -> FP8x23W { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP8x23W) -> FP8x23W { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -198,15 +200,13 @@ fn _sin_loop(a: FP8x23W, i: u64, acc: FP8x23W) -> FP8x23W { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/utils.cairo b/src/numbers/fixed_point/utils.cairo index c15b28690..b680548e5 100644 --- a/src/numbers/fixed_point/utils.cairo +++ b/src/numbers/fixed_point/utils.cairo @@ -7,7 +7,7 @@ const HALF_PRIME: felt252 = // true = negative // false = positive fn felt_sign(a: felt252) -> bool { - return integer::u256_from_felt252(a) > integer::u256_from_felt252(HALF_PRIME); + integer::u256_from_felt252(a) > integer::u256_from_felt252(HALF_PRIME) } // Returns the absolute value of a signed `felt252` diff --git a/src/operators/matrix.cairo b/src/operators/matrix.cairo index 755e13ce4..5e7564d11 100644 --- a/src/operators/matrix.cairo +++ b/src/operators/matrix.cairo @@ -1,6 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; @@ -36,10 +33,10 @@ impl MutMatrixImpl< /// Get the value at (row, col) fn at(ref self: MutMatrix, row: usize, col: usize) -> T { - return match self.get(row, col) { + match self.get(row, col) { Option::Some(val) => val, Option::None => NumberTrait::zero(), - }; + } } /// Performs the product between a m x n `MutMatrix` and a n x 1 `NullableVec`. @@ -48,36 +45,34 @@ impl MutMatrixImpl< ref self: MutMatrix, ref vec: NullableVec ) -> NullableVec { assert(self.cols == vec.len, 'wrong matrix shape for dot'); + let m = self.rows; let n = self.cols; let mut result_vec = VecTrait::new(); let mut i = 0_usize; - loop { - if i == m { - break (); - } + while i != m { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; - loop { - if k == n { - break (); - } + while k != n { sum += MutMatrixImpl::at(ref self, i, k) * VecTrait::at(ref vec, k); + k += 1; }; - VecTrait::set(ref result_vec, i, sum); + VecTrait::set(ref result_vec, i, sum); i += 1; }; - return result_vec; + + result_vec } /// Set the value at (row, col) fn set(ref self: MutMatrix, row: usize, col: usize, value: T) { if row < self.rows && col < self.cols { let index = row * self.cols + col; + self.data.set(index, value) } } @@ -92,13 +87,10 @@ impl MutMatrixImpl< assert(axis < 2, 'Invalid axis'); let mut result: Array = ArrayTrait::new(); + if axis == 0 { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut max_value = self.get(0, col); let mut max_value = match max_value { Option::Some => { max_value.unwrap() }, @@ -107,16 +99,13 @@ impl MutMatrixImpl< let mut max_index = 0; let mut row: usize = 1; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut value = self.get(row, col); let mut value = match value { Option::Some => { value.unwrap() }, Option::None => { NumberTrait::min_value() } }; + if value > max_value { max_value = value; max_index = row; @@ -126,7 +115,6 @@ impl MutMatrixImpl< }; result.append(max_index); - col += 1; }; @@ -134,11 +122,7 @@ impl MutMatrixImpl< } let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut max_value = self.get(row, 0); let mut max_value = match max_value { Option::Some => { max_value.unwrap() }, @@ -147,16 +131,13 @@ impl MutMatrixImpl< let mut max_index = 0; let mut col: usize = 1; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut value = self.get(row, col); let mut value = match value { Option::Some => { value.unwrap() }, Option::None => { NumberTrait::min_value() } }; + if value > max_value { max_value = value; max_index = col; @@ -166,11 +147,10 @@ impl MutMatrixImpl< }; result.append(max_index); - row += 1; }; - return result.span(); + result.span() } /// Apply softmax to the matrix along the specified axis @@ -181,18 +161,10 @@ impl MutMatrixImpl< if axis == 0 { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut sum_exp = NumberTrait::zero(); let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); sum_exp += value.exp(); @@ -200,11 +172,7 @@ impl MutMatrixImpl< }; row = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); @@ -216,18 +184,10 @@ impl MutMatrixImpl< }; } else { let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut sum_exp = NumberTrait::zero(); let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); sum_exp += value.exp(); @@ -235,11 +195,7 @@ impl MutMatrixImpl< }; col = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); @@ -264,32 +220,23 @@ impl MutMatrixImpl< if axis == 0 { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let mut sum_exp = NumberTrait::zero(); let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); + if value != NumberTrait::zero() { sum_exp += value.exp(); } + row += 1; }; row = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let value = self.get(row, col).unwrap().into(); + if value != NumberTrait::zero() { let softmax_value = (value.exp() / sum_exp).into(); result.set(row, col, softmax_value); @@ -304,31 +251,20 @@ impl MutMatrixImpl< }; } else { let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut sum_exp = NumberTrait::zero(); let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { sum_exp += value.exp(); } + col += 1; }; col = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col).unwrap().into(); if value != NumberTrait::zero() { @@ -353,18 +289,11 @@ impl MutMatrixImpl< let mut result = MutMatrixImpl::new(self.rows, self.cols); let mut row: usize = 0; - loop { - if row == self.rows { - break; - } - + while row != self.rows { let mut col: usize = 0; - loop { - if col == self.cols { - break; - } - + while col != self.cols { let value = self.get(row, col); + if value.is_some() { let value = NumberTrait::one() / (NumberTrait::one() + (value.unwrap() * NumberTrait::neg_one()).exp()); diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index f894937dc..263c4abc1 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{ constant_of_shape, new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, @@ -37,7 +32,6 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn min_in_tensor(self: @Tensor) -> bool { panic(array!['not supported!']) } @@ -321,13 +315,13 @@ impl BoolTensor of TensorTrait { ) -> Tensor:: { panic(array!['not supported!']) } + fn qlinear_leakyrelu( self: @Tensor, a_scale: @Tensor, a_zero_point: @Tensor, alpha: bool, ) -> Tensor:: { panic(array!['not supported!']) } - fn round(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -492,26 +486,26 @@ impl BoolTensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ - panic(array!['not supported!']) + ) -> (Tensor::, Tensor::, Tensor) { + panic(array!['not supported!']) } fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { panic(array!['not supported!']) } @@ -532,7 +526,9 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } } @@ -555,15 +551,10 @@ impl BoolTryIntobool of TryInto { } // Internals - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -571,13 +562,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index e035b6a64..6fb8c8433 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{ new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, @@ -17,7 +12,6 @@ use orion::operators::tensor::implementations::{ use orion::numbers::complex_number::complex_trait::ComplexTrait; use orion::numbers::complex_number::complex64::{Complex64Impl, complex64}; - impl Complex64Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -461,7 +455,6 @@ impl Complex64Tensor of TensorTrait { math::reduce_log_sum::reduce_log_sum(self, axis, keepdims) } - fn erf(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -499,7 +492,10 @@ impl Complex64Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } @@ -522,10 +518,15 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, + high: Option, + low: Option, + seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: complex64, end: complex64, step: complex64) -> Tensor { panic(array!['not supported!']) } @@ -541,17 +542,17 @@ impl Complex64Tensor of TensorTrait { fn blackman_window(size: complex64, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor ) -> (Tensor::, Tensor::, Tensor) { @@ -639,22 +640,17 @@ impl Complex64TensorPartialEq of PartialEq> { } } - // Internals - fn eq(lhs: @complex64, rhs: @complex64) -> bool { let eq = (*lhs.real == *rhs.real) && (*lhs.img == *rhs.img); + eq } fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -662,14 +658,10 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 4dd2dd8d3..374d9a740 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -442,7 +437,6 @@ impl FP16x16Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -562,10 +556,12 @@ impl FP16x16Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP16x16, end: FP16x16, step: FP16x16) -> Tensor { math::range::range(start, end, step) } @@ -581,7 +577,7 @@ impl FP16x16Tensor of TensorTrait { fn blackman_window(size: FP16x16, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP16x16 { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -589,29 +585,30 @@ impl FP16x16Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } - + fn scatter_nd( self: @Tensor, updates: Tensor, @@ -703,22 +700,22 @@ impl TensorI8IntoTensorFP16x16 of Into, Tensor> { impl FP16x16TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } @@ -741,11 +738,7 @@ fn relative_eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -753,28 +746,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp16x16(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 6fb17db32..cdfd3c5aa 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -408,7 +403,6 @@ impl FP16x16WTensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -514,10 +508,15 @@ impl FP16x16WTensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, + high: Option, + low: Option, + seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP16x16W, end: FP16x16W, step: FP16x16W) -> Tensor { math::range::range(start, end, step) } @@ -533,37 +532,38 @@ impl FP16x16WTensor of TensorTrait { fn blackman_window(size: FP16x16W, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP16x16W { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } - + fn scatter_nd( self: @Tensor, updates: Tensor, @@ -655,26 +655,25 @@ impl U32TryIntoU32 of TryInto { impl FP16x16WTensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } - // Internals const PRECISION: u64 = 589; // 0.009 @@ -690,15 +689,10 @@ fn relative_eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { rel_diff <= PRECISION } - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -706,14 +700,10 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index e2380b70a..a866e6f71 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -442,7 +437,6 @@ impl FP32x32Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -562,10 +556,12 @@ impl FP32x32Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP32x32, end: FP32x32, step: FP32x32) -> Tensor { math::range::range(start, end, step) } @@ -581,7 +577,7 @@ impl FP32x32Tensor of TensorTrait { fn blackman_window(size: FP32x32, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -589,27 +585,30 @@ impl FP32x32Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } - + fn scatter_nd( self: @Tensor, updates: Tensor, @@ -731,9 +730,7 @@ impl FP32x32TensorPartialOrd of PartialOrd> { } } - // Internals - const PRECISION: u64 = 75497; // 0.009 fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { @@ -751,11 +748,7 @@ fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -763,28 +756,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp32x32(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 982ef5860..6424f1839 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -442,7 +437,6 @@ impl FP64x64Tensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -562,10 +556,12 @@ impl FP64x64Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP64x64, end: FP64x64, step: FP64x64) -> Tensor { math::range::range(start, end, step) } @@ -581,7 +577,7 @@ impl FP64x64Tensor of TensorTrait { fn blackman_window(size: FP64x64, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -589,26 +585,28 @@ impl FP64x64Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } fn scatter_nd( @@ -713,27 +711,26 @@ impl TensorI8IntoTensorFP64x64 of Into, Tensor> { impl FP64x64TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - const PRECISION: u128 = 1660000000000000; // 9e-05 fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { @@ -751,11 +748,7 @@ fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -763,28 +756,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp64x64(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index b38c70e65..ee417a5ca 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -340,7 +335,6 @@ impl FP8x23Tensor of TensorTrait { ) } - fn slice( self: @Tensor, starts: Span, @@ -462,7 +456,6 @@ impl FP8x23Tensor of TensorTrait { panic(array!['not supported!']) } - fn reduce_min( self: @Tensor, axes: Option>, @@ -561,10 +554,12 @@ impl FP8x23Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP8x23, end: FP8x23, step: FP8x23) -> Tensor { math::range::range(start, end, step) } @@ -580,36 +575,36 @@ impl FP8x23Tensor of TensorTrait { fn blackman_window(size: FP8x23, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP8x23 { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( - self: @Tensor, - axis: usize, - keepdims: usize, - split: Option> + self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(255, false), NumberTrait::new_unscaled(0, false), NumberTrait::new_unscaled(1, false), - ) + ) } fn scatter_nd( @@ -727,27 +722,26 @@ impl TensorI8IntoTensorFP8x23 of Into, Tensor> { impl FP8x23TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - const PRECISION: u32 = 75497; // 0.009 fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { @@ -765,11 +759,7 @@ fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -777,28 +767,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_fp8x23(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 51456e6fc..26852334a 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -394,7 +389,6 @@ impl FP8x23WTensor of TensorTrait { panic(array!['not supported!']) } - fn gather_elements( self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { @@ -500,10 +494,12 @@ impl FP8x23WTensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) } - + fn range(start: FP8x23W, end: FP8x23W, step: FP8x23W) -> Tensor { math::range::range(start, end, step) } @@ -519,26 +515,29 @@ impl FP8x23WTensor of TensorTrait { fn blackman_window(size: FP8x23W, periodic: Option) -> Tensor { math::blackman_window::blackman_window(size, FP8x23W { mag: PI, sign: false }, periodic) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - + fn dynamic_quantize_linear( self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + ) -> (Tensor::, Tensor::, Tensor) { quantization::dynamic_quantize_linear::dynamic_quantize_linear( self, NumberTrait::new_unscaled(0, false), @@ -663,27 +662,26 @@ impl U32TryIntoU32 of TryInto { impl FP8x23WTensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - const PRECISION: u64 = 75497; // 0.009 fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { @@ -701,11 +699,7 @@ fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -713,14 +707,10 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index a756ea72f..be111d6c8 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::{I32Div, I32DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; @@ -16,7 +11,6 @@ use orion::operators::tensor::implementations::{ tensor_u32::U32Tensor, tensor_i8::I8Tensor, tensor_bool::BoolTensor }; - impl I32Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -536,10 +530,12 @@ impl I32Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: i32, end: i32, step: i32) -> Tensor { math::range::range(start, end, step) } @@ -555,35 +551,32 @@ impl I32Tensor of TensorTrait { fn blackman_window(size: i32, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - - fn optional(self: @Tensor) -> Option>{ - manipulation::optional::optional(self) + + fn optional(self: @Tensor) -> Option> { + manipulation::optional::optional(self) } - - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor::, Tensor) { panic(array!['not supported!']) } - + fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } @@ -676,35 +669,30 @@ impl TensorI8IntoTensorI32 of Into, Tensor> { impl I32TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -712,28 +700,20 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } fn tensor_i8_to_tensor_i32(x: @Tensor) -> Tensor { let mut result_data = ArrayTrait::::new(); let mut data = *x.data; - loop { + while data.len() != 0 { result_data.append((*data.pop_front().unwrap()).into()); - - if data.len() == 0 { - break (); - }; }; - return TensorTrait::new(*x.shape, result_data.span()); + TensorTrait::new(*x.shape, result_data.span()) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 38d12dbe4..bb90c304c 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::{I8Div, I8DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; @@ -338,7 +333,6 @@ impl I8Tensor of TensorTrait { ) } - fn slice( self: @Tensor, starts: Span, @@ -560,7 +554,6 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -668,35 +661,30 @@ impl I8TensorPartialEq of PartialEq> { impl I8TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } // Internals - fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -704,13 +692,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() == 0 && !is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; -} + is_eq +} \ No newline at end of file diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 599681d13..9e21185f8 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{TryInto, Into}; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -483,10 +478,12 @@ impl U32Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: u32, end: u32, step: u32) -> Tensor { math::range::range(start, end, step) } @@ -502,35 +499,32 @@ impl U32Tensor of TensorTrait { fn blackman_window(size: u32, periodic: Option) -> Tensor { panic(array!['not supported!']) } - - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } - + fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ - panic(array!['not supported!']) + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor::, Tensor) { + panic(array!['not supported!']) } - + fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } @@ -620,22 +614,22 @@ impl U32TryIntoI8 of TryInto { impl U32TensorPartialOrd of PartialOrd> { #[inline(always)] fn ge(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::ge(lhs.data, rhs.data); + SpanPartialOrd::ge(lhs.data, rhs.data) } #[inline(always)] fn gt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::gt(lhs.data, rhs.data); + SpanPartialOrd::gt(lhs.data, rhs.data) } #[inline(always)] fn le(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::le(lhs.data, rhs.data); + SpanPartialOrd::le(lhs.data, rhs.data) } #[inline(always)] fn lt(lhs: Tensor, rhs: Tensor) -> bool { - return SpanPartialOrd::lt(lhs.data, rhs.data); + SpanPartialOrd::lt(lhs.data, rhs.data) } } @@ -644,11 +638,7 @@ impl U32TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - loop { - if lhs.shape.len() == 0 || !is_eq { - break; - } - + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -656,13 +646,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - loop { - if lhs.data.len() == 0 || !is_eq { - break; - } - + while lhs.data.len() != 0 && is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; - return is_eq; + is_eq } diff --git a/src/operators/tensor/linalg/matmul.cairo b/src/operators/tensor/linalg/matmul.cairo index 5be41efa5..fd604fe42 100644 --- a/src/operators/tensor/linalg/matmul.cairo +++ b/src/operators/tensor/linalg/matmul.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -32,6 +28,7 @@ fn matmul< let mut result_data = ArrayTrait::new(); result_shape.append(1); result_data.append(dot); + return TensorTrait::new(result_shape.span(), result_data.span()); } @@ -42,7 +39,7 @@ fn matmul< let result_shape = adjust_output_shape_after_matmul(result.shape, self_ndim, other_ndim); - return TensorTrait::new(result_shape, result.data); + TensorTrait::new(result_shape, result.data) } /// Computes the dot product of two 1-dimensional i32 tensors. @@ -82,7 +79,7 @@ fn dot_product< }; }; - return result; + result } @@ -116,30 +113,16 @@ fn matrix_multiply< let n = *mat1_shape[1]; let p = *mat2_shape[1]; - let mut result_data = ArrayTrait::new(); - let mut result_shape = ArrayTrait::new(); - result_shape.append(m); - result_shape.append(p); + let mut result_data: Array = array![]; + let mut result_shape: Array = array![m, p]; let mut i = 0_usize; - loop { - if i == m { - break (); - } - + while i != m { let mut j = 0_usize; - loop { - if j == p { - break (); - } - + while j != p { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; - loop { - if k == n { - break (); - } - + while k != n { let mat1_index = i * n + k; let mat2_index = k * p + j; sum += *mat1[mat1_index] * *mat2[mat2_index]; @@ -154,7 +137,7 @@ fn matrix_multiply< i += 1; }; - return TensorTrait::new(result_shape.span(), result_data.span()); + TensorTrait::new(result_shape.span(), result_data.span()) } /// Prepares the shape of a tensor for matrix multiplication. @@ -196,7 +179,7 @@ fn prepare_shape_for_matmul(mut shape: Span, is_first_tensor: bool) -> Sp loop { match shape.pop_front() { - Option::Some(item) => { shape_adjusted.append(*item); }, + Option::Some(item) => { shape_adjusted.append(*item) }, Option::None => { break; } }; }; @@ -206,7 +189,7 @@ fn prepare_shape_for_matmul(mut shape: Span, is_first_tensor: bool) -> Sp return shape_adjusted.span(); } - return shape; + shape } /// Adjusts the output shape of the matrix multiplication result based on the @@ -237,5 +220,5 @@ fn adjust_output_shape_after_matmul( let _ = output_shape.pop_back().unwrap(); } - return output_shape; + output_shape } diff --git a/src/operators/tensor/linalg/transpose.cairo b/src/operators/tensor/linalg/transpose.cairo index c7bb96da7..97ad240b4 100644 --- a/src/operators/tensor/linalg/transpose.cairo +++ b/src/operators/tensor/linalg/transpose.cairo @@ -1,6 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{ new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape }; @@ -24,25 +21,18 @@ fn transpose, impl TCopy: Copy, impl TDrop: D let output_shape = permutation_output_shape(*self.shape, axes); let output_data_len = len_from_shape(output_shape); - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; let mut output_index: usize = 0; - loop { - if output_index == output_data_len { - break (); - } - + while output_index != output_data_len { let output_indices = unravel_index(output_index, output_shape); - let mut input_indices = ArrayTrait::new(); + let mut input_indices: Array = array![]; let mut output_axis: usize = 0; - loop { - if output_axis == axes.len() { - break (); - } - + while output_axis != axes.len() { let input_axis = find_axis(axes, output_axis); input_indices.append(*output_indices[input_axis]); + output_axis += 1; }; @@ -52,39 +42,32 @@ fn transpose, impl TCopy: Copy, impl TDrop: D output_index += 1; }; - return TensorTrait::new(output_shape, output_data.span()); + TensorTrait::new(output_shape, output_data.span()) } - fn transpose2D, impl TCopy: Copy, impl TDrop: Drop>( self: @Tensor ) -> Tensor { assert((*self.shape).len() == 2, 'transpose a 2D tensor'); - let mut output_data = ArrayTrait::new(); - let mut output_shape = ArrayTrait::new(); + let mut output_data: Array = array![]; let n = *self.shape[0]; let m = *self.shape[1]; - output_shape.append(m); - output_shape.append(n); + let mut output_shape: Array = array![m, n]; let mut j: usize = 0; - loop { - if j == m { - break (); - } + while j != m { let mut i = 0; - loop { - if i == n { - break (); - } + while i != n { output_data.append(*(*self.data)[i * m + j]); + i += 1; }; + j += 1; }; - return TensorTrait::new(output_shape.span(), output_data.span()); + TensorTrait::new(output_shape.span(), output_data.span()) } diff --git a/src/operators/tensor/linalg/trilu.cairo b/src/operators/tensor/linalg/trilu.cairo index 08bfdcc98..1536fcb6a 100644 --- a/src/operators/tensor/linalg/trilu.cairo +++ b/src/operators/tensor/linalg/trilu.cairo @@ -1,6 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -18,78 +15,77 @@ fn trilu< assert((*self.shape).len() >= 2, 'must have at least 2 dimensions'); let shape_len = (*self.shape).len(); - let mut output_data = ArrayTrait::new(); - let mut output_size = ArrayTrait::new(); + let mut output_data: Array = array![]; + let mut output_size: Array = array![]; let mut batch_size = 1; let mut n: u32 = 0; let mut m: u32 = 0; - { - let mut self_shape = *self.shape; - let mut i = 0; - loop { - match self_shape.pop_front() { - Option::Some(val) => { - if i == shape_len - 2 { - n = *val; - } else if i == shape_len - 1 { - m = *val; - } else { - batch_size *= *val; - } - i += 1; - output_size.append(*val); - }, - Option::None => { break (); } - } + let mut self_shape = *self.shape; + let mut i = 0; + loop { + match self_shape.pop_front() { + Option::Some(val) => { + if i == shape_len - 2 { + n = *val; + } else if i == shape_len - 1 { + m = *val; + } else { + batch_size *= *val; + } + i += 1; + output_size.append(*val); + }, + Option::None => { break; } + } + }; + + let mut self_data = *self.data; + let mut b = 0; + loop { + if b == batch_size { + break (); } - } - { - let mut self_data = *self.data; - let mut b = 0; + let mut i = 0; loop { - if b == batch_size { + if i == n { break (); } - - let mut i = 0; + let mut j = 0; loop { - if i == n { + if j == m { break (); } - let mut j = 0; - loop { - if j == m { - break (); - } - let ii: felt252 = i.into(); - let jj: felt252 = j.into(); + let ii: felt252 = i.into(); + let jj: felt252 = j.into(); - let iii: i64 = ii.try_into().unwrap(); - let jjj: i64 = jj.try_into().unwrap(); + let iii: i64 = ii.try_into().unwrap(); + let jjj: i64 = jj.try_into().unwrap(); - let result = match self_data.pop_front() { - Option::Some(val) => { - if (upper && (iii + k <= jjj)) || (!upper && (iii + k >= jjj)) { - *val - } else { - NumberTrait::zero() - } - }, - Option::None => { break (); } - }; - - output_data.append(result); - j += 1; + let result = match self_data.pop_front() { + Option::Some(val) => { + if (upper && (iii + k <= jjj)) || (!upper && (iii + k >= jjj)) { + *val + } else { + NumberTrait::zero() + } + }, + Option::None => { break; } }; - i += 1; + + output_data.append(result); + + j += 1; }; - b += 1; + + i += 1; }; - } - return TensorTrait::new(*self.shape, output_data.span()); + b += 1; + }; + + TensorTrait::new(*self.shape, output_data.span()) } diff --git a/src/test_helper/tensor/fixed_point/fp16x16.cairo b/src/test_helper/tensor/fixed_point/fp16x16.cairo index 096edb4f8..f82f0972a 100644 --- a/src/test_helper/tensor/fixed_point/fp16x16.cairo +++ b/src/test_helper/tensor/fixed_point/fp16x16.cairo @@ -1,5 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor; @@ -7,381 +5,341 @@ use orion::operators::tensor::{TensorTrait, Tensor}; // 1D fn fp_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 2D - fn fp_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn fp_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } + fn fp_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); - data.append(FixedTrait::new_unscaled(12, false)); - data.append(FixedTrait::new_unscaled(13, false)); - data.append(FixedTrait::new_unscaled(14, false)); - data.append(FixedTrait::new_unscaled(15, false)); - data.append(FixedTrait::new_unscaled(16, false)); - data.append(FixedTrait::new_unscaled(17, false)); - data.append(FixedTrait::new_unscaled(18, false)); - data.append(FixedTrait::new_unscaled(19, false)); - data.append(FixedTrait::new_unscaled(20, false)); - data.append(FixedTrait::new_unscaled(21, false)); - data.append(FixedTrait::new_unscaled(22, false)); - data.append(FixedTrait::new_unscaled(23, false)); - data.append(FixedTrait::new_unscaled(24, false)); - data.append(FixedTrait::new_unscaled(25, false)); - data.append(FixedTrait::new_unscaled(26, false)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false), + FixedTrait::new_unscaled(12, false), + FixedTrait::new_unscaled(13, false), + FixedTrait::new_unscaled(14, false), + FixedTrait::new_unscaled(15, false), + FixedTrait::new_unscaled(16, false), + FixedTrait::new_unscaled(17, false), + FixedTrait::new_unscaled(18, false), + FixedTrait::new_unscaled(19, false), + FixedTrait::new_unscaled(20, false), + FixedTrait::new_unscaled(21, false), + FixedTrait::new_unscaled(22, false), + FixedTrait::new_unscaled(23, false), + FixedTrait::new_unscaled(24, false), + FixedTrait::new_unscaled(25, false), + FixedTrait::new_unscaled(26, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); - data.append(FixedTrait::new_unscaled(12, true)); - data.append(FixedTrait::new_unscaled(13, true)); - data.append(FixedTrait::new_unscaled(14, true)); - data.append(FixedTrait::new_unscaled(15, true)); - data.append(FixedTrait::new_unscaled(16, true)); - data.append(FixedTrait::new_unscaled(17, true)); - data.append(FixedTrait::new_unscaled(18, true)); - data.append(FixedTrait::new_unscaled(19, true)); - data.append(FixedTrait::new_unscaled(20, true)); - data.append(FixedTrait::new_unscaled(21, true)); - data.append(FixedTrait::new_unscaled(22, true)); - data.append(FixedTrait::new_unscaled(23, true)); - data.append(FixedTrait::new_unscaled(24, true)); - data.append(FixedTrait::new_unscaled(25, true)); - data.append(FixedTrait::new_unscaled(26, true)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true), + FixedTrait::new_unscaled(12, true), + FixedTrait::new_unscaled(13, true), + FixedTrait::new_unscaled(14, true), + FixedTrait::new_unscaled(15, true), + FixedTrait::new_unscaled(16, true), + FixedTrait::new_unscaled(17, true), + FixedTrait::new_unscaled(18, true), + FixedTrait::new_unscaled(19, true), + FixedTrait::new_unscaled(20, true), + FixedTrait::new_unscaled(21, true), + FixedTrait::new_unscaled(22, true), + FixedTrait::new_unscaled(23, true), + FixedTrait::new_unscaled(24, true), + FixedTrait::new_unscaled(25, true), + FixedTrait::new_unscaled(26, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } + diff --git a/src/test_helper/tensor/fixed_point/fp8x23.cairo b/src/test_helper/tensor/fixed_point/fp8x23.cairo index eac13a89b..1746859e5 100644 --- a/src/test_helper/tensor/fixed_point/fp8x23.cairo +++ b/src/test_helper/tensor/fixed_point/fp8x23.cairo @@ -1,5 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; use orion::numbers::fixed_point::core::{FixedTrait}; use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor; @@ -7,383 +5,341 @@ use orion::operators::tensor::{TensorTrait, Tensor}; // 1D fn fp_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 2D - fn fp_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); + let mut sizes: Array = array![2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); + let mut sizes: Array = array![3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![3, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); + let mut sizes: Array = array![3, 1]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); + let mut sizes: Array = array![2, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn fp_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); + let mut sizes: Array = array![2, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); + let mut sizes: Array = array![3, 2, 2]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, false)); - data.append(FixedTrait::new_unscaled(2, false)); - data.append(FixedTrait::new_unscaled(3, false)); - data.append(FixedTrait::new_unscaled(4, false)); - data.append(FixedTrait::new_unscaled(5, false)); - data.append(FixedTrait::new_unscaled(6, false)); - data.append(FixedTrait::new_unscaled(7, false)); - data.append(FixedTrait::new_unscaled(8, false)); - data.append(FixedTrait::new_unscaled(9, false)); - data.append(FixedTrait::new_unscaled(10, false)); - data.append(FixedTrait::new_unscaled(11, false)); - data.append(FixedTrait::new_unscaled(12, false)); - data.append(FixedTrait::new_unscaled(13, false)); - data.append(FixedTrait::new_unscaled(14, false)); - data.append(FixedTrait::new_unscaled(15, false)); - data.append(FixedTrait::new_unscaled(16, false)); - data.append(FixedTrait::new_unscaled(17, false)); - data.append(FixedTrait::new_unscaled(18, false)); - data.append(FixedTrait::new_unscaled(19, false)); - data.append(FixedTrait::new_unscaled(20, false)); - data.append(FixedTrait::new_unscaled(21, false)); - data.append(FixedTrait::new_unscaled(22, false)); - data.append(FixedTrait::new_unscaled(23, false)); - data.append(FixedTrait::new_unscaled(24, false)); - data.append(FixedTrait::new_unscaled(25, false)); - data.append(FixedTrait::new_unscaled(26, false)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, false), + FixedTrait::new_unscaled(2, false), + FixedTrait::new_unscaled(3, false), + FixedTrait::new_unscaled(4, false), + FixedTrait::new_unscaled(5, false), + FixedTrait::new_unscaled(6, false), + FixedTrait::new_unscaled(7, false), + FixedTrait::new_unscaled(8, false), + FixedTrait::new_unscaled(9, false), + FixedTrait::new_unscaled(10, false), + FixedTrait::new_unscaled(11, false), + FixedTrait::new_unscaled(12, false), + FixedTrait::new_unscaled(13, false), + FixedTrait::new_unscaled(14, false), + FixedTrait::new_unscaled(15, false), + FixedTrait::new_unscaled(16, false), + FixedTrait::new_unscaled(17, false), + FixedTrait::new_unscaled(18, false), + FixedTrait::new_unscaled(19, false), + FixedTrait::new_unscaled(20, false), + FixedTrait::new_unscaled(21, false), + FixedTrait::new_unscaled(22, false), + FixedTrait::new_unscaled(23, false), + FixedTrait::new_unscaled(24, false), + FixedTrait::new_unscaled(25, false), + FixedTrait::new_unscaled(26, false) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn fp_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(FixedTrait::new_unscaled(0, false)); - data.append(FixedTrait::new_unscaled(1, true)); - data.append(FixedTrait::new_unscaled(2, true)); - data.append(FixedTrait::new_unscaled(3, true)); - data.append(FixedTrait::new_unscaled(4, true)); - data.append(FixedTrait::new_unscaled(5, true)); - data.append(FixedTrait::new_unscaled(6, true)); - data.append(FixedTrait::new_unscaled(7, true)); - data.append(FixedTrait::new_unscaled(8, true)); - data.append(FixedTrait::new_unscaled(9, true)); - data.append(FixedTrait::new_unscaled(10, true)); - data.append(FixedTrait::new_unscaled(11, true)); - data.append(FixedTrait::new_unscaled(12, true)); - data.append(FixedTrait::new_unscaled(13, true)); - data.append(FixedTrait::new_unscaled(14, true)); - data.append(FixedTrait::new_unscaled(15, true)); - data.append(FixedTrait::new_unscaled(16, true)); - data.append(FixedTrait::new_unscaled(17, true)); - data.append(FixedTrait::new_unscaled(18, true)); - data.append(FixedTrait::new_unscaled(19, true)); - data.append(FixedTrait::new_unscaled(20, true)); - data.append(FixedTrait::new_unscaled(21, true)); - data.append(FixedTrait::new_unscaled(22, true)); - data.append(FixedTrait::new_unscaled(23, true)); - data.append(FixedTrait::new_unscaled(24, true)); - data.append(FixedTrait::new_unscaled(25, true)); - data.append(FixedTrait::new_unscaled(26, true)); + let mut sizes: Array = array![3, 3, 3]; + let mut data = array![ + FixedTrait::new_unscaled(0, false), + FixedTrait::new_unscaled(1, true), + FixedTrait::new_unscaled(2, true), + FixedTrait::new_unscaled(3, true), + FixedTrait::new_unscaled(4, true), + FixedTrait::new_unscaled(5, true), + FixedTrait::new_unscaled(6, true), + FixedTrait::new_unscaled(7, true), + FixedTrait::new_unscaled(8, true), + FixedTrait::new_unscaled(9, true), + FixedTrait::new_unscaled(10, true), + FixedTrait::new_unscaled(11, true), + FixedTrait::new_unscaled(12, true), + FixedTrait::new_unscaled(13, true), + FixedTrait::new_unscaled(14, true), + FixedTrait::new_unscaled(15, true), + FixedTrait::new_unscaled(16, true), + FixedTrait::new_unscaled(17, true), + FixedTrait::new_unscaled(18, true), + FixedTrait::new_unscaled(19, true), + FixedTrait::new_unscaled(20, true), + FixedTrait::new_unscaled(21, true), + FixedTrait::new_unscaled(22, true), + FixedTrait::new_unscaled(23, true), + FixedTrait::new_unscaled(24, true), + FixedTrait::new_unscaled(25, true), + FixedTrait::new_unscaled(26, true) + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } diff --git a/src/test_helper/tensor/i32.cairo b/src/test_helper/tensor/i32.cairo index 89979eef0..feadb88dc 100644 --- a/src/test_helper/tensor/i32.cairo +++ b/src/test_helper/tensor/i32.cairo @@ -1,385 +1,223 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - - use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32Tensor; - // 1D fn i32_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } fn i32_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, -1, -2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } // 2D - fn i32_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, 1, 2, 3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, -1, -2, -3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); - data.append(8_i32); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); - data.append(-8_i32); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data: Array = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; + let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, -1, -2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn i32_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); - data.append(8_i32); - data.append(9_i32); - data.append(10_i32); - data.append(11_i32); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); - data.append(-8_i32); - data.append(-9_i32); - data.append(-10_i32); - data.append(-11_i32); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i32_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(1_i32); - data.append(2_i32); - data.append(3_i32); - data.append(4_i32); - data.append(5_i32); - data.append(6_i32); - data.append(7_i32); - data.append(8_i32); - data.append(9_i32); - data.append(10_i32); - data.append(11_i32); - data.append(12_i32); - data.append(13_i32); - data.append(14_i32); - data.append(15_i32); - data.append(16_i32); - data.append(17_i32); - data.append(18_i32); - data.append(19_i32); - data.append(20_i32); - data.append(21_i32); - data.append(22_i32); - data.append(23_i32); - data.append(24_i32); - data.append(25_i32); - data.append(26_i32); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } fn i32_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i32); - data.append(-1_i32); - data.append(-2_i32); - data.append(-3_i32); - data.append(-4_i32); - data.append(-5_i32); - data.append(-6_i32); - data.append(-7_i32); - data.append(-8_i32); - data.append(-9_i32); - data.append(-10_i32); - data.append(-11_i32); - data.append(-12_i32); - data.append(-13_i32); - data.append(-14_i32); - data.append(-15_i32); - data.append(-16_i32); - data.append(-17_i32); - data.append(-18_i32); - data.append(-19_i32); - data.append(-20_i32); - data.append(-21_i32); - data.append(-22_i32); - data.append(-23_i32); - data.append(-24_i32); - data.append(-25_i32); - data.append(-26_i32); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + -1, + -2, + -3, + -4, + -5, + -6, + -7, + -8, + -9, + -10, + -11, + -12, + -13, + -14, + -15, + -16, + -17, + -18, + -19, + -20, + -21, + -22, + -23, + -24, + -25, + -26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } diff --git a/src/test_helper/tensor/i8.cairo b/src/test_helper/tensor/i8.cairo index 6d85e4b3e..a13889c25 100644 --- a/src/test_helper/tensor/i8.cairo +++ b/src/test_helper/tensor/i8.cairo @@ -1,385 +1,224 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - - use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I8Tensor; - // 1D fn i8_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } fn i8_tensor_1x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, -1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + + tensor } // 2D - fn i8_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, 1, 2, 3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, -1, -2, -3]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); - data.append(8_i8); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); - data.append(-8_i8); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data: Array = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; + let tensor = TensorTrait::new(sizes.span(), data.span()); return tensor; } fn i8_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x1_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, -1, -2]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, -1, -2, -3, -4, -5]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D fn i8_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_2x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); - data.append(8_i8); - data.append(9_i8); - data.append(10_i8); - data.append(11_i8); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x2x2_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); - data.append(-8_i8); - data.append(-9_i8); - data.append(-10_i8); - data.append(-11_i8); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11]; let tensor = TensorTrait::new(sizes.span(), data.span()); - return tensor; + tensor } fn i8_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(1_i8); - data.append(2_i8); - data.append(3_i8); - data.append(4_i8); - data.append(5_i8); - data.append(6_i8); - data.append(7_i8); - data.append(8_i8); - data.append(9_i8); - data.append(10_i8); - data.append(11_i8); - data.append(12_i8); - data.append(13_i8); - data.append(14_i8); - data.append(15_i8); - data.append(16_i8); - data.append(17_i8); - data.append(18_i8); - data.append(19_i8); - data.append(20_i8); - data.append(21_i8); - data.append(22_i8); - data.append(23_i8); - data.append(24_i8); - data.append(25_i8); - data.append(26_i8); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } fn i8_tensor_3x3x3_neg_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0_i8); - data.append(-1_i8); - data.append(-2_i8); - data.append(-3_i8); - data.append(-4_i8); - data.append(-5_i8); - data.append(-6_i8); - data.append(-7_i8); - data.append(-8_i8); - data.append(-9_i8); - data.append(-10_i8); - data.append(-11_i8); - data.append(-12_i8); - data.append(-13_i8); - data.append(-14_i8); - data.append(-15_i8); - data.append(-16_i8); - data.append(-17_i8); - data.append(-18_i8); - data.append(-19_i8); - data.append(-20_i8); - data.append(-21_i8); - data.append(-22_i8); - data.append(-23_i8); - data.append(-24_i8); - data.append(-25_i8); - data.append(-26_i8); - - let tensor = TensorTrait::new(sizes.span(), data.span()); - - return tensor; + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + -1, + -2, + -3, + -4, + -5, + -6, + -7, + -8, + -9, + -10, + -11, + -12, + -13, + -14, + -15, + -16, + -17, + -18, + -19, + -20, + -21, + -22, + -23, + -24, + -25, + -26 + ]; + + let tensor = TensorTrait::new(sizes.span(), data.span()); + + tensor } diff --git a/src/test_helper/tensor/u32.cairo b/src/test_helper/tensor/u32.cairo index 553ebecff..09ea289ce 100644 --- a/src/test_helper/tensor/u32.cairo +++ b/src/test_helper/tensor/u32.cairo @@ -1,198 +1,115 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; use orion::operators::tensor::U32Tensor; use orion::operators::tensor::{TensorTrait, Tensor}; // 1D fn u32_tensor_1x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); + let mut sizes: Array = array![3]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 2D - fn u32_tensor_2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); + let mut sizes: Array = array![2, 2]; + let mut data: Array = array![0, 1, 2, 3]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); + let mut sizes: Array = array![3, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); + let mut sizes: Array = array![3, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x1_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); + let mut sizes: Array = array![3, 1]; + let mut data: Array = array![0, 1, 2]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_2x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); + let mut sizes: Array = array![2, 3]; + let mut data: Array = array![0, 1, 2, 3, 4, 5]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } // 3D - fn u32_tensor_2x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); + let mut sizes: Array = array![2, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x2x2_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(2); - sizes.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); + let mut sizes: Array = array![3, 2, 2]; + let mut data: Array = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } fn u32_tensor_3x3x3_helper() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(3); - sizes.append(3); - sizes.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); + let mut sizes: Array = array![3, 3, 3]; + let mut data: Array = array![ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26 + ]; let tensor = TensorTrait::::new(sizes.span(), data.span()); - return tensor; + tensor } diff --git a/src/utils.cairo b/src/utils.cairo index 34946d5c5..e9ec9d31e 100644 --- a/src/utils.cairo +++ b/src/utils.cairo @@ -1,8 +1,3 @@ -use core::traits::TryInto; -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::{Tensor, TensorTrait}; fn u32_max(a: u32, b: u32) -> u32 { @@ -37,10 +32,7 @@ fn assert_seq_eq, impl TCopy: Copy, impl TDr assert(lhs.len() == rhs.len(), 'should be equal'); let mut i = 0; - loop { - if i >= lhs.len() { - break; - } + while i != lhs.len() { assert_eq(lhs[i], rhs[i]); i += 1; } From 33c3f6141b6c24a0fa4f0db4c384e94a5806f545 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 17 Feb 2024 10:17:51 +0100 Subject: [PATCH 18/40] remove refs in linear classifier traits --- .../ml/linear/linear_classifier.cairo | 4 +- tests/ml/linear_classifier_test.cairo | 24 +- tests/nodes.cairo | 2082 ++++++++--------- 3 files changed, 1055 insertions(+), 1055 deletions(-) diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index b9bed234a..bafe5f8a6 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -132,7 +132,7 @@ trait LinearClassifierTrait { /// [0.036323, 0.090237, 0.87344] /// ]) /// ``` - fn predict(ref self: LinearClassifier, X: Tensor) -> (Span, Tensor); + fn predict(self: LinearClassifier, X: Tensor) -> (Span, Tensor); } impl LinearClassifierImpl< @@ -152,7 +152,7 @@ impl LinearClassifierImpl< +Add>, +NNTrait > of LinearClassifierTrait { - fn predict(ref self: LinearClassifier, X: Tensor) -> (Span, Tensor) { + fn predict(self: LinearClassifier, X: Tensor) -> (Span, Tensor) { let n: usize = self.coefficients.len() / *(X.shape).at(1); let mut shape = ArrayTrait::::new(); shape.append(n); diff --git a/tests/ml/linear_classifier_test.cairo b/tests/ml/linear_classifier_test.cairo index 8dc59afd9..7258533ee 100644 --- a/tests/ml/linear_classifier_test.cairo +++ b/tests/ml/linear_classifier_test.cairo @@ -11,7 +11,7 @@ use core::debug::PrintTrait; fn test_linear_classifier_multi_none() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::NONE); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -37,7 +37,7 @@ fn test_linear_classifier_multi_none() { fn test_linear_classifier_multi_softmax() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -62,7 +62,7 @@ fn test_linear_classifier_multi_softmax() { fn test_linear_classifier_multi_softmax_zero() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -88,7 +88,7 @@ fn test_linear_classifier_multi_softmax_zero() { fn test_linear_classifier_multi_logistic() { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0] == 0'); @@ -113,7 +113,7 @@ fn test_linear_classifier_multi_logistic() { fn test_linear_classifier_binary_none() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::NONE); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -132,7 +132,7 @@ fn test_linear_classifier_binary_none() { fn test_linear_classifier_binary_logistic() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -151,7 +151,7 @@ fn test_linear_classifier_binary_logistic() { fn test_linear_classifier_binary_softmax() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); assert(*labels[1] == 1, 'labels[1]'); @@ -169,7 +169,7 @@ fn test_linear_classifier_binary_softmax() { fn test_linear_classifier_binary_softmax_zero() { let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); assert(*labels[1] == 1, 'labels[1]'); @@ -187,7 +187,7 @@ fn test_linear_classifier_binary_softmax_zero() { fn test_linear_classifier_unary_none() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::NONE); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -204,7 +204,7 @@ fn test_linear_classifier_unary_none() { fn test_linear_classifier_unary_logistic() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -221,7 +221,7 @@ fn test_linear_classifier_unary_logistic() { fn test_linear_classifier_unary_softmax() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -238,7 +238,7 @@ fn test_linear_classifier_unary_softmax() { fn test_linear_classifier_unary_softmax_zero() { let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 8814cfb80..337715889 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1041 +1,1041 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod reduce_sum_fp16x16_1D; -mod reduce_sum_fp16x16_2D_default; -mod reduce_sum_fp16x16_2D_keepdims; -mod reduce_sum_fp16x16_2D_axis_1; -mod reduce_sum_fp8x23_1D; -mod reduce_sum_fp8x23_2D_default; -mod reduce_sum_fp8x23_2D_keepdims; -mod reduce_sum_fp8x23_2D_axis_1; -mod reduce_sum_i32_1D; -mod reduce_sum_i32_2D_default; -mod reduce_sum_i32_2D_keepdims; -mod reduce_sum_i32_2D_axis_1; -mod reduce_sum_i8_1D; -mod reduce_sum_i8_2D_default; -mod reduce_sum_i8_2D_keepdims; -mod reduce_sum_i8_2D_axis_1; -mod reduce_sum_u32_1D; -mod reduce_sum_u32_2D_default; -mod reduce_sum_u32_2D_keepdims; -mod reduce_sum_u32_2D_axis_1; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmax_fp16x16_1D_default; +// mod argmax_fp16x16_1D_keepdims_false; +// mod argmax_fp16x16_1D_last_index; +// mod argmax_fp16x16_2D_default; +// mod argmax_fp16x16_2D_keepdims_false; +// mod argmax_fp16x16_2D_last_index; +// mod argmax_fp16x16_3D_default; +// mod argmax_fp16x16_3D_keepdims_false; +// mod argmax_fp16x16_3D_last_index; +// mod argmax_fp8x23_1D_default; +// mod argmax_fp8x23_1D_keepdims_false; +// mod argmax_fp8x23_1D_last_index; +// mod argmax_fp8x23_2D_default; +// mod argmax_fp8x23_2D_keepdims_false; +// mod argmax_fp8x23_2D_last_index; +// mod argmax_fp8x23_3D_default; +// mod argmax_fp8x23_3D_keepdims_false; +// mod argmax_fp8x23_3D_last_index; +// mod argmax_i32_1D_default; +// mod argmax_i32_1D_keepdims_false; +// mod argmax_i32_1D_last_index; +// mod argmax_i32_2D_default; +// mod argmax_i32_2D_keepdims_false; +// mod argmax_i32_2D_last_index; +// mod argmax_i32_3D_default; +// mod argmax_i32_3D_keepdims_false; +// mod argmax_i32_3D_last_index; +// mod argmax_i8_1D_default; +// mod argmax_i8_1D_keepdims_false; +// mod argmax_i8_1D_last_index; +// mod argmax_i8_2D_default; +// mod argmax_i8_2D_keepdims_false; +// mod argmax_i8_2D_last_index; +// mod argmax_i8_3D_default; +// mod argmax_i8_3D_keepdims_false; +// mod argmax_i8_3D_last_index; +// mod argmax_u32_1D_default; +// mod argmax_u32_1D_keepdims_false; +// mod argmax_u32_1D_last_index; +// mod argmax_u32_2D_default; +// mod argmax_u32_2D_keepdims_false; +// mod argmax_u32_2D_last_index; +// mod argmax_u32_3D_default; +// mod argmax_u32_3D_keepdims_false; +// mod argmax_u32_3D_last_index; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod reduce_sum_fp16x16_1D; +// mod reduce_sum_fp16x16_2D_default; +// mod reduce_sum_fp16x16_2D_keepdims; +// mod reduce_sum_fp16x16_2D_axis_1; +// mod reduce_sum_fp8x23_1D; +// mod reduce_sum_fp8x23_2D_default; +// mod reduce_sum_fp8x23_2D_keepdims; +// mod reduce_sum_fp8x23_2D_axis_1; +// mod reduce_sum_i32_1D; +// mod reduce_sum_i32_2D_default; +// mod reduce_sum_i32_2D_keepdims; +// mod reduce_sum_i32_2D_axis_1; +// mod reduce_sum_i8_1D; +// mod reduce_sum_i8_2D_default; +// mod reduce_sum_i8_2D_keepdims; +// mod reduce_sum_i8_2D_axis_1; +// mod reduce_sum_u32_1D; +// mod reduce_sum_u32_2D_default; +// mod reduce_sum_u32_2D_keepdims; +// mod reduce_sum_u32_2D_axis_1; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softmax_fp16x16; +// mod softmax_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod less_fp16x16; +// mod less_fp16x16_broadcast; +// mod less_fp8x23; +// mod less_fp8x23_broadcast; +// mod less_i32; +// mod less_i32_broadcast; +// mod less_i8; +// mod less_i8_broadcast; +// mod less_u32; +// mod less_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod gather_fp8x23_3d_default; +// mod gather_fp8x23_3d_axis1; +// mod gather_fp8x23_3d_axis2; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_i8_3d_default; +// mod gather_i8_3d_axis1; +// mod gather_i8_3d_axis2; +// mod gather_i32_3d_default; +// mod gather_i32_3d_axis1; +// mod gather_i32_3d_axis2; +// mod gather_u32_3d_default; +// mod gather_u32_3d_axis1; +// mod gather_u32_3d_axis2; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod gather_elements_fp16x16_3d_default; +// mod gather_elements_fp16x16_3d_axis1; +// mod gather_elements_fp16x16_3d_axis2; +// mod gather_elements_fp8x23_3d_default; +// mod gather_elements_fp8x23_3d_axis1; +// mod gather_elements_fp8x23_3d_axis2; +// mod gather_elements_i8_3d_default; +// mod gather_elements_i8_3d_axis1; +// mod gather_elements_i32_3d_default; +// mod gather_elements_i32_3d_axis1; +// mod gather_elements_i32_3d_axis2; +// mod gather_elements_u32_default; +// mod gather_elements_u32_axis1; +// mod gather_elements_u32_axis2; +// mod gather_elements_u32_axis3; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_nan_fp8x23; +// mod is_inf_fp16x16; +// mod is_inf_fp8x23; +// mod is_inf_i32; +// mod is_inf_i8; +// mod is_inf_u32; +// mod is_pos_inf_fp16x16; +// mod is_neg_inf_fp16x16; +// mod is_pos_inf_fp8x23; +// mod is_neg_inf_fp8x23; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +// mod is_pos_inf_i8; +// mod is_neg_inf_i8; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; +// mod grid_sample; +// mod grid_sample_cubic; +// mod grid_sample_aligncorners; +// mod grid_sample_nearest; +// mod grid_sample_nearest_aligncorner; +// mod grid_sample_padding_border; +// mod grid_sample_padding_reflection; +// mod grid_sample_padding_zeros; +// mod col2im; +// mod col2im_5D; +// mod col2im_dilations; +// mod col2im_pads; +// mod col2im_strides; +// mod random_uniform_like_fp16x16; +// mod random_uniform_like_fp8x23; +// mod range_fp8x23; +// mod range_fp16x16; +// mod range_i32; +// mod range_i8; +// mod range_u32; +// mod hann_window_fp8x23; +// mod hann_window_fp16x16; +// mod hamming_window_fp16x16; +// mod hamming_window_fp8x23; +// mod blackman_window_fp16x16; +// mod blackman_window_fp8x23; +// mod split_to_sequence_fp16x16_1d_equal_parts; +// mod split_to_sequence_fp16x16_1d_variable_parts; +// mod split_to_sequence_fp16x16_2d_equal_parts; +// mod split_to_sequence_fp16x16_2d_variable_parts; +// mod split_to_sequence_fp16x16_zero_size; +// mod split_to_sequence_fp16x16_1d_uneven; +// mod split_to_sequence_fp16x16_2d_uneven; +// mod split_to_sequence_u32_1d_equal_parts; +// mod split_to_sequence_u32_1d_variable_parts; +// mod split_to_sequence_u32_2d_equal_parts; +// mod split_to_sequence_u32_2d_variable_parts; +// mod split_to_sequence_u32_zero_size; +// mod split_to_sequence_u32_1d_uneven; +// mod split_to_sequence_u32_2d_uneven; +// mod split_to_sequence_2d_scalar; +// mod split_to_sequence_2d_nokeepdims; +// mod split_to_sequence_1d_nokeepdims; +// mod reverse_sequence_fp16x16_batch_equal_parts; +// mod reverse_sequence_fp16x16_time_equal_parts; +// mod reverse_sequence_i32_batch_equal_parts; +// mod reverse_sequence_i32_time_equal_parts; +// mod reverse_sequence_i8_batch_equal_parts; +// mod reverse_sequence_i8_time_equal_parts; +// mod reverse_sequence_u32_4x4_batch; +// mod reverse_sequence_u32_4x4_time; +// mod reverse_sequence_u32_3x3_batch; +// mod reverse_sequence_u32_3x3_time; +// mod reverse_sequence_different_dimensions_4_5; +// mod reverse_sequence_different_dimensions_2_4; +// mod reverse_sequence_different_dimensions_1_6; +// mod reverse_sequence_different_dimensions_3x9_batch; +// mod reverse_sequence_different_dimensions_3x9_time; +// mod conv_transpose; +// mod conv_transpose_1d; +// mod conv_transpose_3d; +// mod conv_transpose_attributes; +// mod conv_transpose_autopad_same; +// mod conv_transpose_dilations; +// mod conv_transpose_pads; +// mod conv_transpose_group_2; +// mod conv_transpose_group_2_image_3; +// mod depth_to_space_fp16x16; +// mod depth_to_space_fp8x23; +// mod depth_to_space_i32; +// mod depth_to_space_i8; +// mod depth_to_space_u32; +// mod space_to_depth_fp16x16; +// mod space_to_depth_fp8x23; +// mod space_to_depth_i32; +// mod space_to_depth_i8; +// mod space_to_depth_u32; +// mod scatter_nd_fp16x16_3d_default; +// mod scatter_nd_fp16x16_3d_add; +// mod scatter_nd_fp16x16_3d_mul; +// mod scatter_nd_fp16x16_3d_max; +// mod scatter_nd_fp16x16_3d_min; +// mod scatter_nd_fp8x23_3d_default; +// mod scatter_nd_fp8x23_3d_add; +// mod scatter_nd_fp8x23_3d_mul; +// mod scatter_nd_fp8x23_3d_max; +// mod scatter_nd_fp8x23_3d_min; +// mod scatter_nd_u32_default; +// mod scatter_nd_u32_add; +// mod scatter_nd_u32_mul; +// mod scatter_nd_u32_max; +// mod scatter_nd_u32_min; +// mod conv_2D_with_padding; +// mod conv_1D_no_padding; +// mod conv_1D_with_padding; +// mod conv_3D_no_padding; +// mod conv_3D_with_padding; +// mod conv_4D_no_padding; +// mod conv_2D_with_2_groups; +// mod conv_2D_with_autopad_same; +// mod conv_2D_with_strides_asymmetric_padding; +// mod conv_2D_with_strides_with_padding; +// mod conv_4D_with_padding; From a05f38753844ce55f1d5b94901cf62758b17e7fc Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 17 Feb 2024 10:34:04 +0100 Subject: [PATCH 19/40] move POST_TRANSFORM enum in ml root file --- src/operators/ml.cairo | 12 +++++++++++- src/operators/ml/linear/linear_classifier.cairo | 12 +----------- src/operators/ml/linear/linear_regressor.cairo | 9 +-------- src/operators/ml/svm/svm_classifier.cairo | 12 +----------- src/operators/ml/svm/svm_regressor.cairo | 10 +--------- .../ml/tree_ensemble/tree_ensemble_classifier.cairo | 10 +--------- .../ml/tree_ensemble/tree_ensemble_regressor.cairo | 11 +---------- 7 files changed, 17 insertions(+), 59 deletions(-) diff --git a/src/operators/ml.cairo b/src/operators/ml.cairo index 724664216..2d52e544e 100644 --- a/src/operators/ml.cairo +++ b/src/operators/ml.cairo @@ -6,7 +6,7 @@ use orion::operators::ml::tree_ensemble::core::{ TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES }; use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{ - TreeEnsembleClassifier, TreeEnsembleClassifierImpl, TreeEnsembleClassifierTrait, POST_TRANSFORM + TreeEnsembleClassifier, TreeEnsembleClassifierImpl, TreeEnsembleClassifierTrait }; use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{ @@ -20,3 +20,13 @@ use orion::operators::ml::linear::linear_regressor::{ use orion::operators::ml::linear::linear_classifier::{ LinearClassifierTrait, LinearClassifierImpl, LinearClassifier }; + + +#[derive(Copy, Drop)] +enum POST_TRANSFORM { + NONE, + SOFTMAX, + LOGISTIC, + SOFTMAXZERO, + PROBIT, +} diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index bafe5f8a6..6af7ad8ef 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -7,7 +7,7 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::nn::{NNTrait, FP16x16NN}; - +use orion::operators::ml::POST_TRANSFORM; #[derive(Destruct)] struct LinearClassifier { @@ -18,16 +18,6 @@ struct LinearClassifier { post_transform: POST_TRANSFORM, } - -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - /// Trait /// /// predict - Performs the linear classification. diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index 75e461729..944697f95 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -13,6 +13,7 @@ use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; +use orion::operators::ml::POST_TRANSFORM; #[derive(Destruct)] struct LinearRegressor { @@ -22,14 +23,6 @@ struct LinearRegressor { post_transform: POST_TRANSFORM, } -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} /// Trait /// diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo index 3b45cc4b7..d89fda31c 100644 --- a/src/operators/ml/svm/svm_classifier.cairo +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -14,6 +14,7 @@ use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; use orion::utils::get_row; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; +use orion::operators::ml::POST_TRANSFORM; #[derive(Copy, Drop, Destruct)] @@ -30,17 +31,6 @@ struct SVMClassifier { vectors_per_class: Option>, } - -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - - #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, diff --git a/src/operators/ml/svm/svm_regressor.cairo b/src/operators/ml/svm/svm_regressor.cairo index be76931e9..2bc886bda 100644 --- a/src/operators/ml/svm/svm_regressor.cairo +++ b/src/operators/ml/svm/svm_regressor.cairo @@ -10,6 +10,7 @@ use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::utils::get_row; +use orion::operators::ml::POST_TRANSFORM; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; @@ -25,15 +26,6 @@ struct SVMRegressor { support_vectors: Span, } -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index ab073a5b5..13b8463b4 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -20,6 +20,7 @@ use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::ml::POST_TRANSFORM; use core::debug::PrintTrait; @@ -35,15 +36,6 @@ struct TreeEnsembleClassifier { post_transform: POST_TRANSFORM, } -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - /// Trait /// /// predict - Returns the top class for each of N inputs. diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index 215ad2a96..fc1066b8d 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -21,6 +21,7 @@ use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::ml::POST_TRANSFORM; use core::debug::PrintTrait; @@ -37,16 +38,6 @@ struct TreeEnsembleRegressor { post_transform: POST_TRANSFORM, } - -#[derive(Copy, Drop)] -enum POST_TRANSFORM { - NONE, - SOFTMAX, - LOGISTIC, - SOFTMAXZERO, - PROBIT, -} - #[derive(Copy, Drop)] enum AGGREGATE_FUNCTION { SUM, From 6bcf5760f733bf7d7ebc192e5bde3248fbcf5488 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 17 Feb 2024 12:42:06 +0100 Subject: [PATCH 20/40] remove reference in linear regressor predict --- .../ml/linear/linear_classifier.cairo | 20 ++++++++--------- .../ml/linear/linear_regressor.cairo | 22 +++++++++---------- tests/ml/linear_regressor_test.cairo | 4 ++-- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index 6af7ad8ef..bb5b1bf64 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -25,7 +25,7 @@ trait LinearClassifierTrait { /// # LinearClassifierTrait::predict /// /// ```rust - /// fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor; + /// fn predict(classifier: LinearClassifier, X: Tensor) -> Tensor; /// ``` /// /// Linear Classifier. Performs the linear classification. @@ -109,7 +109,7 @@ trait LinearClassifierTrait { /// fn linear_classifier_multi_softmax() -> (Span, Tensor) { /// let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); /// - /// let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + /// let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); /// /// (labels, scores) /// } @@ -122,7 +122,7 @@ trait LinearClassifierTrait { /// [0.036323, 0.090237, 0.87344] /// ]) /// ``` - fn predict(self: LinearClassifier, X: Tensor) -> (Span, Tensor); + fn predict(classifier: LinearClassifier, X: Tensor) -> (Span, Tensor); } impl LinearClassifierImpl< @@ -142,17 +142,17 @@ impl LinearClassifierImpl< +Add>, +NNTrait > of LinearClassifierTrait { - fn predict(self: LinearClassifier, X: Tensor) -> (Span, Tensor) { - let n: usize = self.coefficients.len() / *(X.shape).at(1); + fn predict(classifier: LinearClassifier, X: Tensor) -> (Span, Tensor) { + let n: usize = classifier.coefficients.len() / *(X.shape).at(1); let mut shape = ArrayTrait::::new(); shape.append(n); shape.append(*(X.shape).at(1)); - let mut coefficients = TensorTrait::new(shape.span(), self.coefficients); + let mut coefficients = TensorTrait::new(shape.span(), classifier.coefficients); let coefficients = coefficients.transpose(array![1, 0].span()); let mut scores = X.matmul(@coefficients); - match self.intercepts { + match classifier.intercepts { Option::Some(intercepts) => { let mut shape = ArrayTrait::::new(); shape.append(1); @@ -163,7 +163,7 @@ impl LinearClassifierImpl< Option::None => {}, }; - let (n_classes, classlabels) = match self.classlabels { + let (n_classes, classlabels) = match classifier.classlabels { Option::Some(classlabels) => { (classlabels.len(), classlabels) }, Option::None => { (0, ArrayTrait::::new().span()) }, }; @@ -182,7 +182,7 @@ impl LinearClassifierImpl< scores = TensorTrait::new(array![*scores.shape.at(0), 2].span(), new_scores.span()); } // Post Transform - scores = match self.post_transform { + scores = match classifier.post_transform { POST_TRANSFORM::NONE => { scores }, POST_TRANSFORM::SOFTMAX => { NNTrait::softmax(@scores, 1) }, POST_TRANSFORM::LOGISTIC => { NNTrait::sigmoid(@scores) }, @@ -202,7 +202,7 @@ impl LinearClassifierImpl< }; } else { let mut i = 0; - match self.post_transform { + match classifier.post_transform { POST_TRANSFORM::NONE => { loop { if i == scores.data.len() { diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index 944697f95..ab30a28f1 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -31,14 +31,14 @@ trait LinearRegressorTrait { /// # LinearRegressorTrait::predict /// /// ```rust - /// fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + /// fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; /// ``` /// /// Linear Regressor. Performs the generalized linear regression evaluation. /// /// ## Args /// - /// * `self`: LinearRegressor - A LinearRegressor object. + /// * `regressor`: LinearRegressor - A LinearRegressor object. /// * `X`: Input 2D tensor. /// /// ## Returns @@ -98,7 +98,7 @@ trait LinearRegressorTrait { /// post_transform /// }; /// - /// let scores = LinearRegressorTrait::predict(ref regressor, X); + /// let scores = LinearRegressorTrait::predict(regressor, X); /// /// scores /// } @@ -150,7 +150,7 @@ trait LinearRegressorTrait { /// post_transform /// }; /// - /// let scores = LinearRegressorTrait::predict(ref regressor, X); + /// let scores = LinearRegressorTrait::predict(regressor, X); /// /// scores /// } @@ -161,7 +161,7 @@ trait LinearRegressorTrait { /// /// - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; } impl LinearRegressorImpl< @@ -182,17 +182,17 @@ impl LinearRegressorImpl< +Add>, +NNTrait, > of LinearRegressorTrait { - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor { - let n: usize = self.coefficients.len() / self.target; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor { + let n: usize = regressor.coefficients.len() / regressor.target; let mut shape = ArrayTrait::::new(); - shape.append(self.target); + shape.append(regressor.target); shape.append(n); - let mut coefficients = TensorTrait::new(shape.span(), self.coefficients); + let mut coefficients = TensorTrait::new(shape.span(), regressor.coefficients); let coefficients = coefficients.transpose(array![1, 0].span()); let mut score = X.matmul(@coefficients); - match self.intercepts { + match regressor.intercepts { Option::Some(intercepts) => { let mut shape = ArrayTrait::::new(); shape.append(1); @@ -204,7 +204,7 @@ impl LinearRegressorImpl< }; // Post Transform - let score = match self.post_transform { + let score = match regressor.post_transform { POST_TRANSFORM::NONE => score, // No action required POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, 1), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score), diff --git a/tests/ml/linear_regressor_test.cairo b/tests/ml/linear_regressor_test.cairo index 1aa7a4211..141ea40ce 100644 --- a/tests/ml/linear_regressor_test.cairo +++ b/tests/ml/linear_regressor_test.cairo @@ -40,7 +40,7 @@ fn test_linear_regressor() { coefficients, intercepts, target, post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27'); assert(*scores.data[1] == FP16x16 { mag: 79299, sign: true }, '*scores[1] == -1.21'); @@ -84,7 +84,7 @@ fn test_linear_regressor_2() { coefficients, intercepts, target, post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27'); assert(*scores.data[1] == FP16x16 { mag: 4588, sign: true }, '*scores[1] == -0.07'); From ebf07ba9b66355169fc665fd3aefab12c8ea4e9d Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 17 Feb 2024 17:02:25 +0100 Subject: [PATCH 21/40] refactor tree ensemble --- .../tree_ensemble_classifier.cairo | 47 ++++++++++--------- .../tree_ensemble_regressor.cairo | 41 ++++++++-------- tests/ml/tree_ensemble_classifier.cairo | 18 +++---- tests/ml/tree_ensemble_regressor.cairo | 8 ++-- 4 files changed, 58 insertions(+), 56 deletions(-) diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index 13b8463b4..c9edfd51c 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -43,7 +43,7 @@ trait TreeEnsembleClassifierTrait { /// # TreeEnsembleClassifier::predict /// /// ```rust - /// fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + /// fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); /// ``` /// /// Tree Ensemble classifier. Returns the top class for each of N inputs. @@ -227,7 +227,7 @@ trait TreeEnsembleClassifierTrait { /// fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span, MutMatrix::) { /// let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); /// - /// let (labels, scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + /// let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X); /// (labels, scores) /// } /// @@ -240,7 +240,7 @@ trait TreeEnsembleClassifierTrait { /// ]) /// ``` /// - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); } impl TreeEnsembleClassifierImpl< @@ -259,14 +259,15 @@ impl TreeEnsembleClassifierImpl< +Div, +Mul > of TreeEnsembleClassifierTrait { - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::) { - let leaves_index = self.ensemble.leave_index_tree(X); - let n_classes = self.classlabels.len(); + fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::) { + let mut classifier = classifier; + let leaves_index = classifier.ensemble.leave_index_tree(X); + let n_classes = classifier.classlabels.len(); let mut res: MutMatrix = MutMatrixImpl::new(*leaves_index.shape.at(0), n_classes); // Set base values - if self.base_values.is_some() { - let mut base_values = self.base_values.unwrap(); + if classifier.base_values.is_some() { + let mut base_values = classifier.base_values.unwrap(); let mut row: usize = 0; loop { if row == res.rows { @@ -312,12 +313,12 @@ impl TreeEnsembleClassifierImpl< let mut class_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; loop { - if i == self.class_treeids.len() { + if i == classifier.class_treeids.len() { break; } - let tid = *self.class_treeids[i]; - let nid = *self.class_nodeids[i]; + let tid = *classifier.class_treeids[i]; + let nid = *classifier.class_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); @@ -348,8 +349,8 @@ impl TreeEnsembleClassifierImpl< let mut key = PedersenHasherImpl::new(); let key: felt252 = key .hash( - (*self.ensemble.atts.nodes_treeids[*index]).into(), - (*self.ensemble.atts.nodes_nodeids[*index]).into() + (*classifier.ensemble.atts.nodes_treeids[*index]).into(), + (*classifier.ensemble.atts.nodes_nodeids[*index]).into() ); t_index.append(class_index.get(key).deref()); }, @@ -364,21 +365,21 @@ impl TreeEnsembleClassifierImpl< loop { match its.pop_front() { Option::Some(it) => { - match res.get(i, *self.class_ids[*it]) { + match res.get(i, *classifier.class_ids[*it]) { Option::Some(val) => { res .set( i, - *self.class_ids[*it], - val + *self.class_weights[*it] + *classifier.class_ids[*it], + val + *classifier.class_weights[*it] ); }, Option::None => { res .set( i, - *self.class_ids[*it], - *self.class_weights[*it] + *classifier.class_ids[*it], + *classifier.class_weights[*it] ); }, }; @@ -396,7 +397,7 @@ impl TreeEnsembleClassifierImpl< // Binary class let mut binary = false; let mut i: usize = 0; - let mut class_ids = self.class_ids; + let mut class_ids = classifier.class_ids; let mut class_id: usize = 0; // Get first class_id in class_ids match class_ids.pop_front() { @@ -404,7 +405,7 @@ impl TreeEnsembleClassifierImpl< Option::None => { class_id = 0; } }; loop { - if i == self.class_ids.len() { + if i == classifier.class_ids.len() { break; } match class_ids.pop_front() { @@ -436,7 +437,7 @@ impl TreeEnsembleClassifierImpl< }; i += 1; }; - match self.post_transform { + match classifier.post_transform { POST_TRANSFORM::NONE => { let mut i: usize = 0; loop { @@ -518,7 +519,7 @@ impl TreeEnsembleClassifierImpl< } // Post Transform - let mut new_scores = match self.post_transform { + let mut new_scores = match classifier.post_transform { POST_TRANSFORM::NONE => res, // No action required POST_TRANSFORM::SOFTMAX => res.softmax(1), POST_TRANSFORM::LOGISTIC => res.sigmoid(), @@ -532,7 +533,7 @@ impl TreeEnsembleClassifierImpl< let mut labels_list = ArrayTrait::new(); loop { match labels.pop_front() { - Option::Some(i) => { labels_list.append(*self.classlabels[*i]); }, + Option::Some(i) => { labels_list.append(*classifier.classlabels[*i]); }, Option::None => { break; } }; }; diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index fc1066b8d..136af9aa0 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -53,7 +53,7 @@ trait TreeEnsembleRegressorTrait { /// # TreeEnsembleRegressor::predict /// /// ```rust - /// fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); + /// fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); /// ``` /// /// Tree Ensemble regressor. Returns the regressed values for each input in N. @@ -212,7 +212,7 @@ trait TreeEnsembleRegressorTrait { /// /// fn test_tree_ensemble_regressor_SUM() -> MutMatrix:: { /// let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - /// let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + /// let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); /// res /// } /// >>> @@ -221,7 +221,7 @@ trait TreeEnsembleRegressorTrait { /// /// ``` /// - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> MutMatrix::; + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> MutMatrix::; } impl TreeEnsembleRegressorImpl< @@ -240,22 +240,23 @@ impl TreeEnsembleRegressorImpl< +Div, +Mul, > of TreeEnsembleRegressorTrait { - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> MutMatrix:: { - let leaves_index = self.ensemble.leave_index_tree(X); - let n_targets = self.n_targets; + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> MutMatrix:: { + let mut regressor = regressor; + let leaves_index = regressor.ensemble.leave_index_tree(X); + let n_targets = regressor.n_targets; let mut res: MutMatrix = MutMatrixImpl::new(*leaves_index.shape.at(0), n_targets); - let n_trees = self.ensemble.tree_ids.len(); + let n_trees = regressor.ensemble.tree_ids.len(); let mut target_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; loop { - if i == self.target_treeids.len() { + if i == regressor.target_treeids.len() { break; } - let tid = *self.target_treeids[i]; - let nid = *self.target_nodeids[i]; + let tid = *regressor.target_treeids[i]; + let nid = *regressor.target_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); @@ -287,8 +288,8 @@ impl TreeEnsembleRegressorImpl< let mut key = PedersenHasherImpl::new(); let key: felt252 = key .hash( - (*self.ensemble.atts.nodes_treeids[*index]).into(), - (*self.ensemble.atts.nodes_nodeids[*index]).into() + (*regressor.ensemble.atts.nodes_treeids[*index]).into(), + (*regressor.ensemble.atts.nodes_nodeids[*index]).into() ); t_index.append(target_index.get(key).deref()); }, @@ -297,20 +298,20 @@ impl TreeEnsembleRegressorImpl< }; let mut t_index = t_index.span(); - match self.aggregate_function { - AGGREGATE_FUNCTION::SUM => { compute_res_SUM(ref self, ref res, ref t_index, i); }, + match regressor.aggregate_function { + AGGREGATE_FUNCTION::SUM => { compute_res_SUM(ref regressor, ref res, ref t_index, i); }, AGGREGATE_FUNCTION::AVERAGE => { - compute_res_AVERAGE(ref self, ref res, ref t_index, n_trees, i); + compute_res_AVERAGE(ref regressor, ref res, ref t_index, n_trees, i); }, - AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref self, ref res, ref t_index, i); }, - AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref self, ref res, ref t_index, i); }, + AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref regressor, ref res, ref t_index, i); }, + AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref regressor, ref res, ref t_index, i); }, }; i += 1; }; // Convention is to add base_values after aggregate function - if self.base_values.is_some() { - let mut base_values = self.base_values.unwrap(); + if regressor.base_values.is_some() { + let mut base_values = regressor.base_values.unwrap(); let mut row: usize = 0; loop { if row == res.rows { @@ -337,7 +338,7 @@ impl TreeEnsembleRegressorImpl< } // Post Transform - let mut new_scores = match self.post_transform { + let mut new_scores = match regressor.post_transform { POST_TRANSFORM::NONE => res, // No action required POST_TRANSFORM::SOFTMAX => res.softmax(1), POST_TRANSFORM::LOGISTIC => res.sigmoid(), diff --git a/tests/ml/tree_ensemble_classifier.cairo b/tests/ml/tree_ensemble_classifier.cairo index 441aabb34..eb3c7ef67 100644 --- a/tests/ml/tree_ensemble_classifier.cairo +++ b/tests/ml/tree_ensemble_classifier.cairo @@ -12,7 +12,7 @@ use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; fn test_tree_ensemble_classifier_multi_pt_none() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::NONE); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -64,7 +64,7 @@ fn test_tree_ensemble_classifier_multi_pt_none() { fn test_tree_ensemble_classifier_multi_pt_softmax() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0]'); @@ -116,7 +116,7 @@ fn test_tree_ensemble_classifier_multi_pt_softmax() { fn test_tree_ensemble_classifier_multi_pt_softmax_zero() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAXZERO); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0] == 0'); @@ -169,7 +169,7 @@ fn test_tree_ensemble_classifier_multi_pt_softmax_zero() { fn test_tree_ensemble_classifier_multi_pt_logistic() { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::LOGISTIC); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 0, 'labels[0] == 0'); @@ -221,7 +221,7 @@ fn test_tree_ensemble_classifier_multi_pt_logistic() { fn test_tree_ensemble_classifier_binary_none() { let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::NONE); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -245,7 +245,7 @@ fn test_tree_ensemble_classifier_binary_logistic() { POST_TRANSFORM::LOGISTIC ); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -267,7 +267,7 @@ fn test_tree_ensemble_classifier_binary_logistic() { fn test_tree_ensemble_classifier_binary_softmax() { let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -291,7 +291,7 @@ fn test_tree_ensemble_classifier_binary_softmax_zero() { POST_TRANSFORM::SOFTMAXZERO ); - let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // ASSERT LABELS assert(*labels[0] == 1, 'labels[0]'); @@ -313,7 +313,7 @@ fn test_tree_ensemble_classifier_binary_softmax_zero() { // fn test_tree_ensemble_classifier_binary_probit() { // let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::PROBIT); -// let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); +// let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X); // // ASSERT LABELS // assert(*labels[0] == 1, 'labels[0]'); diff --git a/tests/ml/tree_ensemble_regressor.cairo b/tests/ml/tree_ensemble_regressor.cairo index 5b1aeeb41..70452cb42 100644 --- a/tests/ml/tree_ensemble_regressor.cairo +++ b/tests/ml/tree_ensemble_regressor.cairo @@ -13,7 +13,7 @@ use core::debug::PrintTrait; fn test_tree_ensemble_regressor_SUM() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( @@ -35,7 +35,7 @@ fn test_tree_ensemble_regressor_SUM() { fn test_tree_ensemble_regressor_AVERAGE() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::AVERAGE); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( @@ -57,7 +57,7 @@ fn test_tree_ensemble_regressor_AVERAGE() { fn test_tree_ensemble_regressor_MIN() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::MIN); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( @@ -79,7 +79,7 @@ fn test_tree_ensemble_regressor_MIN() { fn test_tree_ensemble_regressor_MAX() { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::MAX); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); // ASSERT RES assert( From 3eab6523a324fee07ce8e832036f4b2d1c9bc039 Mon Sep 17 00:00:00 2001 From: TAdev0 Date: Mon, 19 Feb 2024 18:04:00 +0100 Subject: [PATCH 22/40] final_refactor_loops_and_improve_gas_consumption --- .../implementations/fp16x16/core.cairo | 132 ++-- .../implementations/fp16x16/math/comp.cairo | 36 +- .../implementations/fp16x16/math/core.cairo | 148 ++-- .../implementations/fp16x16/math/erf.cairo | 2 +- .../implementations/fp16x16/math/hyp.cairo | 23 +- .../implementations/fp16x16/math/lut.cairo | 11 +- .../implementations/fp16x16/math/trig.cairo | 51 +- .../implementations/fp16x16wide/core.cairo | 138 ++-- .../implementations/fp16x16wide/helpers.cairo | 1 - .../fp16x16wide/math/comp.cairo | 36 +- .../fp16x16wide/math/core.cairo | 148 ++-- .../fp16x16wide/math/erf.cairo | 3 +- .../fp16x16wide/math/hyp.cairo | 23 +- .../fp16x16wide/math/lut.cairo | 11 +- .../fp16x16wide/math/trig.cairo | 51 +- .../implementations/fp32x32/comp.cairo | 25 +- .../implementations/fp32x32/core.cairo | 127 ++-- .../implementations/fp32x32/erf.cairo | 4 +- .../implementations/fp32x32/lut.cairo | 3 +- .../implementations/fp64x64/comp.cairo | 22 +- .../implementations/fp64x64/core.cairo | 125 ++-- .../implementations/fp64x64/erf.cairo | 4 +- .../implementations/fp64x64/lut.cairo | 3 +- .../implementations/fp8x23/math/comp.cairo | 28 +- .../implementations/fp8x23/math/core.cairo | 53 +- .../implementations/fp8x23/math/trig.cairo | 10 +- .../fp8x23wide/math/comp.cairo | 4 +- src/numbers/fixed_point/utils.cairo | 12 +- .../ml/linear/linear_classifier.cairo | 48 +- .../ml/linear/linear_regressor.cairo | 20 +- src/operators/ml/svm/core.cairo | 26 +- src/operators/ml/svm/svm_classifier.cairo | 303 +++----- src/operators/ml/svm/svm_regressor.cairo | 31 +- src/operators/ml/tree_ensemble/core.cairo | 21 +- .../tree_ensemble_classifier.cairo | 110 +-- .../tree_ensemble_regressor.cairo | 66 +- src/operators/nn/functional/col2im.cairo | 131 ++-- src/operators/nn/functional/conv.cairo | 661 ++++++------------ .../nn/functional/conv_transpose.cairo | 315 +++------ .../nn/functional/depth_to_space.cairo | 21 +- src/operators/nn/functional/gemm.cairo | 9 +- src/operators/nn/functional/grid_sample.cairo | 267 +++---- .../nn/functional/hard_sigmoid.cairo | 12 +- src/operators/nn/functional/leaky_relu.cairo | 12 +- src/operators/nn/functional/linear.cairo | 4 +- src/operators/nn/functional/logsoftmax.cairo | 7 +- src/operators/nn/functional/relu.cairo | 8 +- src/operators/nn/functional/sigmoid.cairo | 12 +- src/operators/nn/functional/softmax.cairo | 4 +- .../nn/functional/softmax_zero.cairo | 39 +- src/operators/nn/functional/softplus.cairo | 12 +- src/operators/nn/functional/softsign.cairo | 12 +- .../nn/functional/space_to_depth.cairo | 14 +- .../nn/functional/thresholded_relu.cairo | 8 +- .../nn/implementations/nn_fp16x16.cairo | 14 +- .../nn/implementations/nn_fp32x32.cairo | 14 +- .../nn/implementations/nn_fp64x64.cairo | 14 +- .../nn/implementations/nn_fp8x23.cairo | 10 +- src/operators/nn/implementations/nn_i32.cairo | 10 +- src/operators/nn/implementations/nn_i8.cairo | 10 +- src/operators/nn/implementations/nn_u32.cairo | 10 +- .../functional/concat_from_sequence.cairo | 28 +- .../sequence/functional/sequence_at.cairo | 14 +- .../functional/sequence_construct.cairo | 5 +- .../sequence/functional/sequence_empty.cairo | 9 +- .../sequence/functional/sequence_erase.cairo | 20 +- .../sequence/functional/sequence_insert.cairo | 20 +- .../sequence/functional/sequence_length.cairo | 7 +- .../implementations/sequence_bool.cairo | 2 - .../implementations/sequence_fp16x16.cairo | 3 - .../sequence_fp16x16wide.cairo | 3 - .../implementations/sequence_fp32x32.cairo | 3 - .../implementations/sequence_fp64x64.cairo | 3 - .../implementations/sequence_fp8x23.cairo | 3 - .../implementations/sequence_fp8x23wide.cairo | 3 - .../implementations/sequence_i32.cairo | 3 - .../implementations/sequence_i8.cairo | 3 - .../implementations/sequence_u32.cairo | 3 - src/operators/tensor/core.cairo | 22 +- src/operators/tensor/helpers.cairo | 88 +-- .../tensor/implementations/tensor_i8.cairo | 34 +- .../tensor/manipulation/optional.cairo | 8 +- .../manipulation/reverse_sequence.cairo | 106 ++- src/operators/tensor/manipulation/split.cairo | 57 +- .../manipulation/split_to_sequence.cairo | 102 +-- .../tensor/manipulation/unique.cairo | 22 +- src/operators/tensor/math/abs.cairo | 8 +- src/operators/tensor/math/acos.cairo | 8 +- src/operators/tensor/math/acosh.cairo | 9 +- src/operators/tensor/math/and.cairo | 13 +- src/operators/tensor/math/argmax.cairo | 16 +- src/operators/tensor/math/argmin.cairo | 16 +- src/operators/tensor/math/arithmetic.cairo | 106 +-- src/operators/tensor/math/asin.cairo | 8 +- src/operators/tensor/math/asinh.cairo | 10 +- src/operators/tensor/math/atan.cairo | 10 +- src/operators/tensor/math/binarizer.cairo | 8 +- src/operators/tensor/math/bitwise_and.cairo | 14 +- src/operators/tensor/math/bitwise_or.cairo | 14 +- src/operators/tensor/math/bitwise_xor.cairo | 14 +- .../tensor/math/blackman_window.cairo | 61 +- src/operators/tensor/math/ceil.cairo | 8 +- src/operators/tensor/math/compress.cairo | 18 +- src/operators/tensor/math/concat.cairo | 23 +- src/operators/tensor/math/cos.cairo | 10 +- src/operators/tensor/math/cosh.cairo | 10 +- src/operators/tensor/math/cumsum.cairo | 31 +- src/operators/tensor/math/equal.cairo | 13 +- src/operators/tensor/math/erf.cairo | 11 +- src/operators/tensor/math/exp.cairo | 14 +- src/operators/tensor/math/flatten.cairo | 6 +- src/operators/tensor/math/gather.cairo | 34 +- .../tensor/math/gather_elements.cairo | 19 +- src/operators/tensor/math/gather_nd.cairo | 36 +- src/operators/tensor/math/greater.cairo | 13 +- src/operators/tensor/math/greater_equal.cairo | 13 +- .../tensor/math/hamming_window.cairo | 38 +- src/operators/tensor/math/hann_window.cairo | 38 +- src/operators/tensor/math/is_inf.cairo | 16 +- src/operators/tensor/math/is_nan.cairo | 8 +- .../tensor/math/layer_normalization.cairo | 68 +- src/operators/tensor/math/less.cairo | 13 +- src/operators/tensor/math/less_equal.cairo | 13 +- src/operators/tensor/math/log.cairo | 10 +- src/operators/tensor/math/max.cairo | 17 +- src/operators/tensor/math/max_in_tensor.cairo | 5 +- src/operators/tensor/math/min.cairo | 17 +- src/operators/tensor/math/min_in_tensor.cairo | 5 +- src/operators/tensor/math/neg.cairo | 10 +- src/operators/tensor/math/not.cairo | 9 +- src/operators/tensor/math/onehot.cairo | 30 +- .../tensor/math/optional_get_element.cairo | 10 +- src/operators/tensor/math/or.cairo | 13 +- src/operators/tensor/math/pow.cairo | 13 +- .../tensor/math/random_uniform_like.cairo | 50 +- src/operators/tensor/math/range.cairo | 27 +- src/operators/tensor/math/reduce_l1.cairo | 11 +- src/operators/tensor/math/reduce_l2.cairo | 17 +- .../tensor/math/reduce_log_sum.cairo | 9 +- src/operators/tensor/math/reduce_mean.cairo | 44 +- src/operators/tensor/math/reduce_min.cairo | 44 +- src/operators/tensor/math/reduce_prod.cairo | 19 +- src/operators/tensor/math/reduce_sum.cairo | 19 +- .../tensor/math/reduce_sum_square.cairo | 14 +- src/operators/tensor/math/resize.cairo | 403 +++++------ src/operators/tensor/math/round.cairo | 8 +- src/operators/tensor/math/scatter.cairo | 35 +- src/operators/tensor/math/scatter_nd.cairo | 90 +-- src/operators/tensor/math/shrink.cairo | 8 +- src/operators/tensor/math/sign.cairo | 10 +- src/operators/tensor/math/sin.cairo | 10 +- src/operators/tensor/math/sinh.cairo | 11 +- src/operators/tensor/math/sqrt.cairo | 10 +- src/operators/tensor/math/tanh.cairo | 10 +- src/operators/tensor/math/where.cairo | 13 +- src/operators/tensor/math/xor.cairo | 13 +- .../tensor/ml/array_feature_extractor.cairo | 24 +- .../quantization/dequantize_linear.cairo | 11 +- .../dynamic_quantize_linear.cairo | 33 +- .../tensor/quantization/qlinear_add.cairo | 7 +- .../tensor/quantization/qlinear_concat.cairo | 12 +- .../quantization/qlinear_leakyrelu.cairo | 10 +- .../tensor/quantization/qlinear_matmul.cairo | 60 +- .../tensor/quantization/qlinear_mul.cairo | 6 +- .../tensor/quantization/quantize_linear.cairo | 11 +- tests/nodes/random_uniform_like_fp16x16.cairo | 7 +- tests/nodes/random_uniform_like_fp8x23.cairo | 7 +- tests/nodes/range_fp16x16.cairo | 6 +- tests/nodes/range_fp8x23.cairo | 6 +- tests/nodes/range_i32.cairo | 2 +- tests/nodes/range_i8.cairo | 2 +- tests/nodes/range_u32.cairo | 2 +- ...se_sequence_different_dimensions_1_6.cairo | 7 +- ...se_sequence_different_dimensions_2_4.cairo | 7 +- ...uence_different_dimensions_3x9_batch.cairo | 7 +- ...quence_different_dimensions_3x9_time.cairo | 7 +- ...se_sequence_different_dimensions_4_5.cairo | 7 +- ...equence_fp16x16_2d_batch_equal_parts.cairo | 7 +- ...sequence_fp16x16_2d_time_equal_parts.cairo | 7 +- ...e_sequence_fp16x16_batch_equal_parts.cairo | 7 +- ...se_sequence_fp16x16_time_equal_parts.cairo | 7 +- ...se_sequence_i32_2d_batch_equal_parts.cairo | 7 +- ...rse_sequence_i32_2d_time_equal_parts.cairo | 7 +- ...verse_sequence_i32_batch_equal_parts.cairo | 7 +- ...everse_sequence_i32_time_equal_parts.cairo | 7 +- ...rse_sequence_i8_2d_batch_equal_parts.cairo | 7 +- ...erse_sequence_i8_2d_time_equal_parts.cairo | 7 +- ...everse_sequence_i8_batch_equal_parts.cairo | 7 +- ...reverse_sequence_i8_time_equal_parts.cairo | 7 +- .../reverse_sequence_time_equal_parts.cairo | 7 +- ...se_sequence_u32_2d_batch_equal_parts.cairo | 7 +- ...rse_sequence_u32_2d_time_equal_parts.cairo | 7 +- .../reverse_sequence_u32_3x3_batch.cairo | 7 +- .../nodes/reverse_sequence_u32_3x3_time.cairo | 7 +- .../reverse_sequence_u32_4x4_batch.cairo | 7 +- .../nodes/reverse_sequence_u32_4x4_time.cairo | 7 +- .../reverse_sequence_u32_zero_size.cairo | 7 +- tests/nodes/scatter_nd_fp16x16_3d_add.cairo | 3 +- .../nodes/scatter_nd_fp16x16_3d_default.cairo | 2 +- tests/nodes/scatter_nd_fp16x16_3d_max.cairo | 3 +- tests/nodes/scatter_nd_fp16x16_3d_min.cairo | 3 +- tests/nodes/scatter_nd_fp16x16_3d_mul.cairo | 3 +- tests/nodes/scatter_nd_fp8x23_3d_add.cairo | 3 +- .../nodes/scatter_nd_fp8x23_3d_default.cairo | 2 +- tests/nodes/scatter_nd_fp8x23_3d_max.cairo | 3 +- tests/nodes/scatter_nd_fp8x23_3d_min.cairo | 3 +- tests/nodes/scatter_nd_fp8x23_3d_mul.cairo | 3 +- tests/nodes/scatter_nd_u32_add.cairo | 3 +- tests/nodes/scatter_nd_u32_default.cairo | 2 +- tests/nodes/scatter_nd_u32_max.cairo | 3 +- tests/nodes/scatter_nd_u32_min.cairo | 3 +- tests/nodes/scatter_nd_u32_mul.cairo | 3 +- ...t_to_sequence_fp16x16_1d_equal_parts.cairo | 7 +- .../split_to_sequence_fp16x16_1d_uneven.cairo | 7 +- ...o_sequence_fp16x16_1d_variable_parts.cairo | 9 +- ...t_to_sequence_fp16x16_2d_equal_parts.cairo | 7 +- .../split_to_sequence_fp16x16_2d_uneven.cairo | 7 +- ...o_sequence_fp16x16_2d_variable_parts.cairo | 9 +- .../split_to_sequence_fp16x16_zero_size.cairo | 9 +- ...split_to_sequence_u32_1d_equal_parts.cairo | 7 +- .../split_to_sequence_u32_1d_uneven.cairo | 7 +- ...it_to_sequence_u32_1d_variable_parts.cairo | 9 +- ...split_to_sequence_u32_2d_equal_parts.cairo | 7 +- .../split_to_sequence_u32_2d_uneven.cairo | 7 +- ...it_to_sequence_u32_2d_variable_parts.cairo | 9 +- .../split_to_sequence_u32_zero_size.cairo | 9 +- .../optional/optional_get_element_test.cairo | 14 +- .../optional/optional_has_element_test.cairo | 14 +- tests/operators/optional/optional_test.cairo | 94 ++- tests/performance.cairo | 2 +- .../dynamic_quantize_linear_test.cairo | 2 +- .../dynamic_quantize_linear_fp_test.cairo | 22 +- 232 files changed, 2621 insertions(+), 3902 deletions(-) diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index 421339dae..8f77324aa 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -21,169 +21,170 @@ const MAX: u32 = 2147483648; // 2 ** 31 impl FP16x16Impl of FixedTrait { fn ZERO() -> FP16x16 { - return FP16x16 { mag: 0, sign: false }; + FP16x16 { mag: 0, sign: false } } fn HALF() -> FP16x16 { - return FP16x16 { mag: HALF, sign: false }; + FP16x16 { mag: HALF, sign: false } } fn ONE() -> FP16x16 { - return FP16x16 { mag: ONE, sign: false }; + FP16x16 { mag: ONE, sign: false } } fn MAX() -> FP16x16 { - return FP16x16 { mag: MAX, sign: false }; + FP16x16 { mag: MAX, sign: false } } fn new(mag: u32, sign: bool) -> FP16x16 { - return FP16x16 { mag: mag, sign: sign }; + FP16x16 { mag: mag, sign: sign } } fn new_unscaled(mag: u32, sign: bool) -> FP16x16 { - return FP16x16 { mag: mag * ONE, sign: sign }; + FP16x16 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP16x16 { let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP16x16) -> FP16x16 { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP16x16) -> FP16x16 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP16x16) -> FP16x16 { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP16x16) -> FP16x16 { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP16x16) -> FP16x16 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP16x16) -> FP16x16 { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP16x16) -> FP16x16 { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP16x16) -> FP16x16 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP16x16) -> FP16x16 { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP16x16) -> FP16x16 { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP16x16) -> FP16x16 { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP16x16) -> FP16x16 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP16x16) -> FP16x16 { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP16x16) -> FP16x16 { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP16x16) -> FP16x16 { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP16x16) -> FP16x16 { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP16x16) -> FP16x16 { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP16x16) -> FP16x16 { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP16x16) -> FP16x16 { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP16x16) -> FP16x16 { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP16x16, b: FP16x16) -> FP16x16 { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP16x16) -> FP16x16 { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP16x16) -> FP16x16 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP16x16) -> FP16x16 { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP16x16) -> FP16x16 { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP16x16) -> FP16x16 { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP16x16) -> FP16x16 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP16x16) -> FP16x16 { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP16x16) -> FP16x16 { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP16x16) -> FP16x16 { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP16x16 { - return FP16x16 { mag: 0, sign: true }; + FP16x16 { mag: 0, sign: true } } fn is_nan(self: FP16x16) -> bool { @@ -191,15 +192,15 @@ impl FP16x16Impl of FixedTrait { } fn INF() -> FP16x16 { - return FP16x16 { mag: 4294967295, sign: false }; + FP16x16 { mag: 4294967295, sign: false } } fn POS_INF() -> FP16x16 { - return FP16x16 { mag: 4294967295, sign: false }; + FP16x16 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP16x16 { - return FP16x16 { mag: 4294967295, sign: true }; + FP16x16 { mag: 4294967295, sign: true } } fn is_inf(self: FP16x16) -> bool { @@ -215,7 +216,7 @@ impl FP16x16Impl of FixedTrait { } fn erf(self: FP16x16) -> FP16x16 { - return erf::erf(self); + erf::erf(self) } } @@ -233,9 +234,9 @@ impl FP16x16IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -256,10 +257,10 @@ impl FP16x16TryIntoI8 of TryInto { impl FP16x16TryIntoU128 of TryInto { fn try_into(self: FP16x16) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -267,10 +268,10 @@ impl FP16x16TryIntoU128 of TryInto { impl FP16x16TryIntoU64 of TryInto { fn try_into(self: FP16x16) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -278,10 +279,10 @@ impl FP16x16TryIntoU64 of TryInto { impl FP16x16TryIntoU32 of TryInto { fn try_into(self: FP16x16) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some(self.mag / ONE); + Option::Some(self.mag / ONE) } } } @@ -292,7 +293,7 @@ impl FP16x16TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -303,7 +304,7 @@ impl FP16x16TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -311,18 +312,18 @@ impl FP16x16TryIntoU8 of TryInto { impl FP16x16PartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP16x16, rhs: @FP16x16) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP16x16Add of Add { fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -335,7 +336,7 @@ impl FP16x16AddEq of AddEq { impl FP16x16Sub of Sub { fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -348,7 +349,7 @@ impl FP16x16SubEq of SubEq { impl FP16x16Mul of Mul { fn mul(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -361,7 +362,7 @@ impl FP16x16MulEq of MulEq { impl FP16x16Div of Div { fn div(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -375,48 +376,47 @@ impl FP16x16DivEq of DivEq { impl FP16x16PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP16x16, rhs: FP16x16) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP16x16Neg of Neg { #[inline(always)] fn neg(a: FP16x16) -> FP16x16 { - return core_math::neg(a); + core_math::neg(a) } } impl FP16x16Rem of Rem { #[inline(always)] fn rem(lhs: FP16x16, rhs: FP16x16) -> FP16x16 { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } - /// INTERNAL - fn _i32_into_fp(x: FP16x16) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } + number_i32 } diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo index ddf153f18..b53adc614 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo @@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ }; fn max(a: FP16x16, b: FP16x16) -> FP16x16 { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP16x16, b: FP16x16) -> FP16x16 { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP16x16, b: FP16x16) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP16x16, b: FP16x16) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP16x16, b: FP16x16) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP16x16, b: FP16x16, c: FP16x16) -> FP16x16 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP16x16, b: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP16x16, b: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -70,7 +70,6 @@ fn bitwise_or(a: FP16x16, b: FP16x16) -> FP16x16 { mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; - #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); @@ -127,6 +126,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo index d477f051d..0085d2639 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul}; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, MAX, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ use orion::numbers::fixed_point::implementations::fp16x16::math::lut; // PUBLIC - fn abs(a: FP16x16) -> FP16x16 { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP16x16, b: FP16x16) -> FP16x16 { @@ -28,23 +22,23 @@ fn add(a: FP16x16, b: FP16x16) -> FP16x16 { } if (a.mag > b.mag) { - return FixedTrait::new(a.mag - b.mag, a.sign); + FixedTrait::new(a.mag - b.mag, a.sign) } else { - return FixedTrait::new(b.mag - a.mag, b.sign); + FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP16x16) -> FP16x16 { - let (div, rem) = u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div + 1, false); + FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { - return FixedTrait::new_unscaled(0, false); + FixedTrait::new_unscaled(0, false) } else { - return FixedTrait::new_unscaled(div, true); + FixedTrait::new_unscaled(div, true) } } @@ -53,16 +47,16 @@ fn div(a: FP16x16, b: FP16x16) -> FP16x16 { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP16x16, b: @FP16x16) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP16x16) -> FP16x16 { - return exp2(FixedTrait::new(94548, false) * a); // log2(e) * 2^23 ≈ 12102203 + exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP16x16) -> FP16x16 { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -87,57 +81,57 @@ fn exp2(a: FP16x16) -> FP16x16 { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u32) -> FP16x16 { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP16x16) -> FP16x16 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div, false); + FixedTrait::new_unscaled(div, false) } else { - return FixedTrait::new_unscaled(div + 1, true); + FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP16x16) -> FP16x16 { - return FixedTrait::new(45426, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) @@ -157,7 +151,7 @@ fn log2(a: FP16x16) -> FP16x16 { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(596, true) * norm; @@ -168,21 +162,22 @@ fn log2(a: FP16x16) -> FP16x16 { let r3 = (r4 + FixedTrait::new(608566, false)) * norm; let r2 = (r3 + FixedTrait::new(655828, true)) * norm; let r1 = (r2 + FixedTrait::new(534433, false)) * norm; - return r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP16x16) -> FP16x16 { - return FixedTrait::new(19728, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP16x16, b: FP16x16) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -190,20 +185,20 @@ fn mul(a: FP16x16, b: FP16x16) -> FP16x16 { let prod_u128 = integer::u32_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP16x16, b: @FP16x16) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP16x16) -> FP16x16 { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -211,7 +206,7 @@ fn neg(a: FP16x16) -> FP16x16 { // self is a FP16x16 point value // b is a FP16x16 point value fn pow(a: FP16x16, b: FP16x16) -> FP16x16 { - let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); + let (_, rem) = integer::u32_safe_divmod(b.mag, integer::u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -219,7 +214,7 @@ fn pow(a: FP16x16, b: FP16x16) -> FP16x16 { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -227,7 +222,7 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -238,11 +233,7 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { let mut y = FixedTrait::ONE(); let two = integer::u32_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u32_safe_divmod(n, two); if rem == 1 { @@ -253,20 +244,20 @@ fn pow_int(a: FP16x16, b: u32, sign: bool) -> FP16x16 { n = div; }; - return x * y; + x * y } fn rem(a: FP16x16, b: FP16x16) -> FP16x16 { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP16x16) -> FP16x16 { - let (div, rem) = integer::u32_safe_divmod(a.mag, u32_as_non_zero(ONE)); + let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } @@ -276,11 +267,12 @@ fn sqrt(a: FP16x16) -> FP16x16 { assert(a.sign == false, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP16x16, b: FP16x16) -> FP16x16 { - return add(a, -b); + add(a, -b) } fn sign(a: FP16x16) -> FP16x16 { @@ -467,7 +459,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -475,7 +467,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -549,12 +541,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -567,17 +559,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -590,12 +582,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -605,17 +597,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo index 86f87f5ca..4561e5b78 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/erf.cairo @@ -1,4 +1,3 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE, FP16x16, FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16::math::lut::erf_lut; @@ -20,5 +19,6 @@ fn erf(x: FP16x16) -> FP16x16 { } else { erf_value = ONE; } + FP16x16 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo index 78d0cdac2..b77271087 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo @@ -1,4 +1,3 @@ -use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp16x16::core::{ HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul, FP16x16MulEq, FP16x16TryIntoU128, FP16x16PartialEq, FP16x16PartialOrd, FP16x16SubEq, FP16x16Neg, @@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP16x16) -> FP16x16 { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP16x16) -> FP16x16 { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP16x16) -> FP16x16 { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP16x16) -> FP16x16 { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP16x16) -> FP16x16 { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP16x16) -> FP16x16 { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; - #[test] #[available_gas(10000000)] fn test_cosh() { diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo index 65c9746c1..723ac975f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/lut.cairo @@ -54,7 +54,7 @@ fn msb(whole: u32) -> (u32, u32) { } } - return (16, 65536); + (16, 65536) } fn exp2(exp: u32) -> u32 { @@ -112,7 +112,7 @@ fn exp2(exp: u32) -> u32 { } } - return 65536; + 65536 } fn sin(a: u32) -> (u32, u32, u32) { @@ -929,7 +929,7 @@ fn sin(a: u32) -> (u32, u32, u32) { } } - return (102542, 65535, 65536); + (102542, 65535, 65536) } fn atan(a: u32) -> (u32, u32, u32) { @@ -1233,7 +1233,7 @@ fn atan(a: u32) -> (u32, u32, u32) { return (44958, 39405, 39716); } - return (45416, 39716, 40025); + (45416, 39716, 40025) } fn erf_lut(x: u32) -> u32 { @@ -1925,5 +1925,6 @@ fn erf_lut(x: u32) -> u32 { return 65535; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo index 8b0d9b47f..7c4ad199c 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u32_safe_divmod, u32_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp16x16::math::lut; use orion::numbers::fixed_point::implementations::fp16x16::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::{ }; // CONSTANTS - const TWO_PI: u32 = 411775; const PI: u32 = 205887; const HALF_PI: u32 = 102944; @@ -22,10 +19,10 @@ fn acos(a: FP16x16) -> FP16x16 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -33,10 +30,10 @@ fn acos_fast(a: FP16x16) -> FP16x16 { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP16x16) -> FP16x16 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP16x16) -> FP16x16 { @@ -57,7 +55,8 @@ fn asin_fast(a: FP16x16) -> FP16x16 { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,10 +99,9 @@ fn atan(a: FP16x16) -> FP16x16 { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } - fn atan_fast(a: FP16x16) -> FP16x16 { let mut at = a.abs(); let mut shift = false; @@ -135,31 +133,32 @@ fn atan_fast(a: FP16x16) -> FP16x16 { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP16x16) -> FP16x16 { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP16x16) -> FP16x16 { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP16x16) -> FP16x16 { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP16x16) -> FP16x16 { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u32_safe_divmod(a1, u32_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u32_safe_divmod(a1, integer::u32_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -171,7 +170,7 @@ fn sin_fast(a: FP16x16) -> FP16x16 { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -179,14 +178,16 @@ fn tan(a: FP16x16) -> FP16x16 { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP16x16) -> FP16x16 { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -199,15 +200,13 @@ fn _sin_loop(a: FP16x16, i: u32, acc: FP16x16) -> FP16x16 { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index 9c97cce46..0a6c4795e 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use orion::numbers::{fixed_point::core::FixedTrait, FP16x16}; use orion::numbers::fixed_point::implementations::fp16x16wide::math::{ core as core_math, trig, hyp, erf @@ -18,178 +14,177 @@ struct FP16x16W { } // CONSTANTS - const TWO: u64 = 131072; // 2 ** 17 const ONE: u64 = 65536; // 2 ** 16 const HALF: u64 = 32768; // 2 ** 15 const MAX: u64 = 2147483648; // 2 ** 31 - impl FP16x16WImpl of FixedTrait { fn ZERO() -> FP16x16W { - return FP16x16W { mag: 0, sign: false }; + FP16x16W { mag: 0, sign: false } } fn HALF() -> FP16x16W { - return FP16x16W { mag: HALF, sign: false }; + FP16x16W { mag: HALF, sign: false } } fn ONE() -> FP16x16W { - return FP16x16W { mag: ONE, sign: false }; + FP16x16W { mag: ONE, sign: false } } fn MAX() -> FP16x16W { - return FP16x16W { mag: MAX, sign: false }; + FP16x16W { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP16x16W { - return FP16x16W { mag: mag, sign: sign }; + FP16x16W { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP16x16W { - return FP16x16W { mag: mag * ONE, sign: sign }; + FP16x16W { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP16x16W { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP16x16W) -> FP16x16W { - return core_math::abs(self); + core_math::abs(self) } fn acos(self: FP16x16W) -> FP16x16W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acos_fast(self: FP16x16W) -> FP16x16W { - return trig::acos_fast(self); + trig::acos_fast(self) } fn acosh(self: FP16x16W) -> FP16x16W { - return hyp::acosh(self); + hyp::acosh(self) } fn asin(self: FP16x16W) -> FP16x16W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asin_fast(self: FP16x16W) -> FP16x16W { - return trig::asin_fast(self); + trig::asin_fast(self) } fn asinh(self: FP16x16W) -> FP16x16W { - return hyp::asinh(self); + hyp::asinh(self) } fn atan(self: FP16x16W) -> FP16x16W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atan_fast(self: FP16x16W) -> FP16x16W { - return trig::atan_fast(self); + trig::atan_fast(self) } fn atanh(self: FP16x16W) -> FP16x16W { - return hyp::atanh(self); + hyp::atanh(self) } fn ceil(self: FP16x16W) -> FP16x16W { - return core_math::ceil(self); + core_math::ceil(self) } fn cos(self: FP16x16W) -> FP16x16W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cos_fast(self: FP16x16W) -> FP16x16W { - return trig::cos_fast(self); + trig::cos_fast(self) } fn cosh(self: FP16x16W) -> FP16x16W { - return hyp::cosh(self); + hyp::cosh(self) } fn floor(self: FP16x16W) -> FP16x16W { - return core_math::floor(self); + core_math::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP16x16W) -> FP16x16W { - return core_math::exp(self); + core_math::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP16x16W) -> FP16x16W { - return core_math::exp2(self); + core_math::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP16x16W) -> FP16x16W { - return core_math::ln(self); + core_math::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP16x16W) -> FP16x16W { - return core_math::log2(self); + core_math::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP16x16W) -> FP16x16W { - return core_math::log10(self); + core_math::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W { - return core_math::pow(self, b); + core_math::pow(self, b) } fn round(self: FP16x16W) -> FP16x16W { - return core_math::round(self); + core_math::round(self) } fn sin(self: FP16x16W) -> FP16x16W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sin_fast(self: FP16x16W) -> FP16x16W { - return trig::sin_fast(self); + trig::sin_fast(self) } fn sinh(self: FP16x16W) -> FP16x16W { - return hyp::sinh(self); + hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP16x16W) -> FP16x16W { - return core_math::sqrt(self); + core_math::sqrt(self) } fn tan(self: FP16x16W) -> FP16x16W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tan_fast(self: FP16x16W) -> FP16x16W { - return trig::tan_fast(self); + trig::tan_fast(self) } fn tanh(self: FP16x16W) -> FP16x16W { - return hyp::tanh(self); + hyp::tanh(self) } fn sign(self: FP16x16W) -> FP16x16W { - return core_math::sign(self); + core_math::sign(self) } fn NaN() -> FP16x16W { - return FP16x16W { mag: 0, sign: true }; + FP16x16W { mag: 0, sign: true } } fn is_nan(self: FP16x16W) -> bool { @@ -197,15 +192,15 @@ impl FP16x16WImpl of FixedTrait { } fn INF() -> FP16x16W { - return FP16x16W { mag: 4294967295, sign: false }; + FP16x16W { mag: 4294967295, sign: false } } fn POS_INF() -> FP16x16W { - return FP16x16W { mag: 4294967295, sign: false }; + FP16x16W { mag: 4294967295, sign: false } } fn NEG_INF() -> FP16x16W { - return FP16x16W { mag: 4294967295, sign: true }; + FP16x16W { mag: 4294967295, sign: true } } fn is_inf(self: FP16x16W) -> bool { @@ -221,7 +216,7 @@ impl FP16x16WImpl of FixedTrait { } fn erf(self: FP16x16W) -> FP16x16W { - return erf::erf(self); + erf::erf(self) } } @@ -239,9 +234,9 @@ impl FP16x16WIntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -277,10 +272,10 @@ impl FP16x16WTryIntoI8 of TryInto { impl FP16x16WTryIntoU128 of TryInto { fn try_into(self: FP16x16W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -288,10 +283,10 @@ impl FP16x16WTryIntoU128 of TryInto { impl FP16x16WTryIntoU64 of TryInto { fn try_into(self: FP16x16W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -299,10 +294,10 @@ impl FP16x16WTryIntoU64 of TryInto { impl FP16x16WTryIntoU32 of TryInto { fn try_into(self: FP16x16W) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -313,7 +308,7 @@ impl FP16x16WTryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -324,7 +319,7 @@ impl FP16x16WTryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -332,18 +327,18 @@ impl FP16x16WTryIntoU8 of TryInto { impl FP16x16WPartialEq of PartialEq { #[inline(always)] fn eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { - return core_math::eq(lhs, rhs); + core_math::eq(lhs, rhs) } #[inline(always)] fn ne(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { - return core_math::ne(lhs, rhs); + core_math::ne(lhs, rhs) } } impl FP16x16WAdd of Add { fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::add(lhs, rhs); + core_math::add(lhs, rhs) } } @@ -356,7 +351,7 @@ impl FP16x16WAddEq of AddEq { impl FP16x16WSub of Sub { fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::sub(lhs, rhs); + core_math::sub(lhs, rhs) } } @@ -369,7 +364,7 @@ impl FP16x16WSubEq of SubEq { impl FP16x16WMul of Mul { fn mul(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::mul(lhs, rhs); + core_math::mul(lhs, rhs) } } @@ -382,7 +377,7 @@ impl FP16x16WMulEq of MulEq { impl FP16x16WDiv of Div { fn div(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::div(lhs, rhs); + core_math::div(lhs, rhs) } } @@ -396,48 +391,47 @@ impl FP16x16WDivEq of DivEq { impl FP16x16WPartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::ge(lhs, rhs); + core_math::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::gt(lhs, rhs); + core_math::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::le(lhs, rhs); + core_math::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP16x16W, rhs: FP16x16W) -> bool { - return core_math::lt(lhs, rhs); + core_math::lt(lhs, rhs) } } impl FP16x16WNeg of Neg { #[inline(always)] fn neg(a: FP16x16W) -> FP16x16W { - return core_math::neg(a); + core_math::neg(a) } } impl FP16x16WRem of Rem { #[inline(always)] fn rem(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W { - return core_math::rem(lhs, rhs); + core_math::rem(lhs, rhs) } } - /// INTERNAL - fn _i32_into_fp(x: FP16x16W) -> i32 { let number_felt: felt252 = (x.mag / ONE).into(); let number_i32: i32 = number_felt.try_into().unwrap(); if x.sign { return number_i32 * -1_i32; } + number_i32 } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo index c9627852a..ea5f7cf65 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo @@ -1,5 +1,4 @@ use core::debug::PrintTrait; -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WSub, FP16x16WDiv, FixedTrait, FP16x16WPrint diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo index 50f93edea..5573f7650 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo @@ -3,65 +3,65 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ }; fn max(a: FP16x16W, b: FP16x16W) -> FP16x16W { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP16x16W, b: FP16x16W) -> FP16x16W { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP16x16W, b: FP16x16W) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP16x16W, b: FP16x16W) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP16x16W, b: FP16x16W) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP16x16W, b: FP16x16W, c: FP16x16W) -> FP16x16W { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } // Tests -------------------------------------------------------------------------------------------------------------- @@ -70,7 +70,6 @@ fn bitwise_or(a: FP16x16W, b: FP16x16W) -> FP16x16W { mod tests { use super::{FixedTrait, max, min, bitwise_and, bitwise_xor, bitwise_or}; - #[test] fn test_max() { let a = FixedTrait::new_unscaled(1, false); @@ -127,6 +126,7 @@ mod tests { assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)') } + #[test] fn test_bitwise_or() { let a = FixedTrait::new(225280, false); // 3.4375 let b = FixedTrait::new(4160843776, true); // -2046.5625 diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo index 902a54b48..cafc20e4d 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo @@ -1,9 +1,4 @@ -use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{Into, TryInto}; use core::integer; -use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul}; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, MAX, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul, @@ -13,9 +8,8 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut; // PUBLIC - fn abs(a: FP16x16W) -> FP16x16W { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W { @@ -28,23 +22,23 @@ fn add(a: FP16x16W, b: FP16x16W) -> FP16x16W { } if (a.mag > b.mag) { - return FixedTrait::new(a.mag - b.mag, a.sign); + FixedTrait::new(a.mag - b.mag, a.sign) } else { - return FixedTrait::new(b.mag - a.mag, b.sign); + FixedTrait::new(b.mag - a.mag, b.sign) } } fn ceil(a: FP16x16W) -> FP16x16W { - let (div, rem) = u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div + 1, false); + FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { - return FixedTrait::new_unscaled(0, false); + FixedTrait::new_unscaled(0, false) } else { - return FixedTrait::new_unscaled(div, true); + FixedTrait::new_unscaled(div, true) } } @@ -53,16 +47,16 @@ fn div(a: FP16x16W, b: FP16x16W) -> FP16x16W { let res_u64 = a_u64 / b.mag.into(); // Re-apply sign - return FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new(res_u64.try_into().unwrap(), a.sign ^ b.sign) } fn eq(a: @FP16x16W, b: @FP16x16W) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } // Calculates the natural exponent of x: e^x fn exp(a: FP16x16W) -> FP16x16W { - return exp2(FixedTrait::new(94548, false) * a); // log2(e) * 2^23 ≈ 12102203 + exp2(FixedTrait::new(94548, false) * a) // log2(e) * 2^23 ≈ 12102203 } // Calculates the binary exponent of x: 2^x @@ -71,7 +65,7 @@ fn exp2(a: FP16x16W) -> FP16x16W { return FixedTrait::ONE(); } - let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (int_part, frac_part) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); let int_res = FixedTrait::new_unscaled(lut::exp2(int_part), false); let mut res_u = int_res; @@ -87,57 +81,57 @@ fn exp2(a: FP16x16W) -> FP16x16W { res_u = res_u * (r1 + FixedTrait::ONE()); } - if (a.sign == true) { - return FixedTrait::ONE() / res_u; + if a.sign { + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } fn exp2_int(exp: u64) -> FP16x16W { - return FixedTrait::new_unscaled(lut::exp2(exp), false); + FixedTrait::new_unscaled(lut::exp2(exp), false) } fn floor(a: FP16x16W) -> FP16x16W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div, false); + FixedTrait::new_unscaled(div, false) } else { - return FixedTrait::new_unscaled(div + 1, true); + FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(a: FP16x16W) -> FP16x16W { - return FixedTrait::new(45426, false) * log2(a); // ln(2) = 0.693... + FixedTrait::new(45426, false) * log2(a) // ln(2) = 0.693... } // Calculates the binary logarithm of x: log2(x) @@ -157,7 +151,7 @@ fn log2(a: FP16x16W) -> FP16x16W { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(596, true) * norm; @@ -168,21 +162,22 @@ fn log2(a: FP16x16W) -> FP16x16W { let r3 = (r4 + FixedTrait::new(608566, false)) * norm; let r2 = (r3 + FixedTrait::new(655828, true)) * norm; let r1 = (r2 + FixedTrait::new(534433, false)) * norm; - return r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(224487, true) + FixedTrait::new_unscaled(msb, false) } } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(a: FP16x16W) -> FP16x16W { - return FixedTrait::new(19728, false) * log2(a); // log10(2) = 0.301... + FixedTrait::new(19728, false) * log2(a) // log10(2) = 0.301... } fn lt(a: FP16x16W, b: FP16x16W) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -190,20 +185,20 @@ fn mul(a: FP16x16W, b: FP16x16W) -> FP16x16W { let prod_u128 = integer::u64_wide_mul(a.mag, b.mag); // Re-apply sign - return FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign); + FixedTrait::new((prod_u128 / ONE.into()).try_into().unwrap(), a.sign ^ b.sign) } fn ne(a: @FP16x16W, b: @FP16x16W) -> bool { - return (*a.mag != *b.mag) || (*a.sign != *b.sign); + (*a.mag != *b.mag) || (*a.sign != *b.sign) } fn neg(a: FP16x16W) -> FP16x16W { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -211,7 +206,7 @@ fn neg(a: FP16x16W) -> FP16x16W { // self is a FP16x16W point value // b is a FP16x16W point value fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W { - let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); + let (_, rem) = integer::u64_safe_divmod(b.mag, integer::u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -219,7 +214,7 @@ fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W { } // x^y = exp(y*ln(x)) for x > 0 will error for x < 0 - return exp(b * ln(a)); + exp(b * ln(a)) } // Calclates the value of a^b and checks for overflow before returning @@ -227,7 +222,7 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { let mut x = a; let mut n = b; - if sign == true { + if sign { x = FixedTrait::ONE() / x; } @@ -238,11 +233,7 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { let mut y = FixedTrait::ONE(); let two = integer::u64_as_non_zero(2); - loop { - if n <= 1 { - break; - } - + while n > 1 { let (div, rem) = integer::u64_safe_divmod(n, two); if rem == 1 { @@ -253,20 +244,20 @@ fn pow_int(a: FP16x16W, b: u64, sign: bool) -> FP16x16W { n = div; }; - return x * y; + x * y } fn rem(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return a - floor(a / b) * b; + a - floor(a / b) * b } fn round(a: FP16x16W) -> FP16x16W { - let (div, rem) = integer::u64_safe_divmod(a.mag, u64_as_non_zero(ONE)); + let (div, rem) = integer::u64_safe_divmod(a.mag, integer::u64_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } @@ -276,11 +267,12 @@ fn sqrt(a: FP16x16W) -> FP16x16W { assert(a.sign == false, 'must be positive'); let root = integer::u64_sqrt(a.mag.into() * ONE.into()); - return FixedTrait::new(root.into(), false); + + FixedTrait::new(root.into(), false) } fn sub(a: FP16x16W, b: FP16x16W) -> FP16x16W { - return add(a, -b); + add(a, -b) } fn sign(a: FP16x16W) -> FP16x16W { @@ -467,7 +459,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = eq(@a, @b); - assert(c == true, 'invalid result'); + assert(c, 'invalid result'); } #[test] @@ -475,7 +467,7 @@ mod tests { let a = FixedTrait::new_unscaled(42, false); let b = FixedTrait::new_unscaled(42, false); let c = ne(@a, @b); - assert(c == false, 'invalid result'); + assert(!c, 'invalid result'); } #[test] @@ -549,12 +541,12 @@ mod tests { let c = FixedTrait::::new_unscaled(1, true); assert(a <= a, 'a <= a'); - assert(a <= b == false, 'a <= b'); - assert(a <= c == false, 'a <= c'); + assert(!(a <= b), 'a <= b'); + assert(!(a <= c), 'a <= c'); assert(b <= a, 'b <= a'); assert(b <= b, 'b <= b'); - assert(b <= c == false, 'b <= c'); + assert(!(b <= c), 'b <= c'); assert(c <= a, 'c <= a'); assert(c <= b, 'c <= b'); @@ -567,17 +559,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a < a == false, 'a < a'); - assert(a < b == false, 'a < b'); - assert(a < c == false, 'a < c'); + assert(!(a < a), 'a < a'); + assert(!(a < b), 'a < b'); + assert(!(a < c), 'a < c'); assert(b < a, 'b < a'); - assert(b < b == false, 'b < b'); - assert(b < c == false, 'b < c'); + assert(!(b < b), 'b < b'); + assert(!(b < c), 'b < c'); assert(c < a, 'c < a'); assert(c < b, 'c < b'); - assert(c < c == false, 'c < c'); + assert(!(c < c), 'c < c'); } #[test] @@ -590,12 +582,12 @@ mod tests { assert(a >= b, 'a >= b'); assert(a >= c, 'a >= c'); - assert(b >= a == false, 'b >= a'); + assert(!(b >= a), 'b >= a'); assert(b >= b, 'b >= b'); assert(b >= c, 'b >= c'); - assert(c >= a == false, 'c >= a'); - assert(c >= b == false, 'c >= b'); + assert(!(c >= a), 'c >= a'); + assert(!(c >= b), 'c >= b'); assert(c >= c, 'c >= c'); } @@ -605,17 +597,17 @@ mod tests { let b = FixedTrait::new_unscaled(0, false); let c = FixedTrait::::new_unscaled(1, true); - assert(a > a == false, 'a > a'); + assert(!(a > a), 'a > a'); assert(a > b, 'a > b'); assert(a > c, 'a > c'); - assert(b > a == false, 'b > a'); - assert(b > b == false, 'b > b'); + assert(!(b > a), 'b > a'); + assert(!(b > b), 'b > b'); assert(b > c, 'b > c'); - assert(c > a == false, 'c > a'); - assert(c > b == false, 'c > b'); - assert(c > c == false, 'c > c'); + assert(!(c > a), 'c > a'); + assert(!(c > b), 'c > b'); + assert(!(c > c), 'c > c'); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo index 49d19bf20..143b7dfe6 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/erf.cairo @@ -1,8 +1,6 @@ -use core::traits::Into; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ONE, FP16x16W, FixedTrait}; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut::erf_lut; - const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; const ROUND_CHECK_NUMBER: u64 = 10; // Values > MAX_ERF_NUMBER return 1 @@ -21,5 +19,6 @@ fn erf(x: FP16x16W) -> FP16x16W { } else { erf_value = ONE; } + FP16x16W { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo index 527b6046d..e2ab580fb 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo @@ -1,4 +1,3 @@ -use core::debug::PrintTrait; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul, FP16x16WMulEq, FP16x16WTryIntoU128, FP16x16WPartialEq, FP16x16WPartialOrd, FP16x16WSubEq, @@ -8,53 +7,55 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ // Calculates hyperbolic cosine of a (fixed point) fn cosh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); - return (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea + (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic sine of a (fixed point) fn sinh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); - return (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false); + + (ea - (FixedTrait::ONE() / ea)) / FixedTrait::new(TWO, false) } // Calculates hyperbolic tangent of a (fixed point) fn tanh(a: FP16x16W) -> FP16x16W { let ea = a.exp(); let ea_i = FixedTrait::ONE() / ea; - return (ea - ea_i) / (ea + ea_i); + + (ea - ea_i) / (ea + ea_i) } // Calculates inverse hyperbolic cosine of a (fixed point) fn acosh(a: FP16x16W) -> FP16x16W { let root = (a * a - FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic sine of a (fixed point) fn asinh(a: FP16x16W) -> FP16x16W { let root = (a * a + FixedTrait::ONE()).sqrt(); - return (a + root).ln(); + + (a + root).ln() } // Calculates inverse hyperbolic tangent of a (fixed point) fn atanh(a: FP16x16W) -> FP16x16W { let one = FixedTrait::ONE(); let ln_arg = (one + a) / (one - a); - return ln_arg.ln() / FixedTrait::new(TWO, false); + + ln_arg.ln() / FixedTrait::new(TWO, false) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::option::OptionTrait; - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::assert_precise; use super::{FixedTrait, TWO, cosh, ONE, sinh, tanh, acosh, asinh, atanh, HALF}; - #[test] #[available_gas(10000000)] fn test_cosh() { diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo index 62c58537e..f40f4d15a 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/lut.cairo @@ -54,7 +54,7 @@ fn msb(whole: u64) -> (u64, u64) { } } - return (16, 65536); + (16, 65536) } fn exp2(exp: u64) -> u64 { @@ -112,7 +112,7 @@ fn exp2(exp: u64) -> u64 { } } - return 65536; + 65536 } fn sin(a: u64) -> (u64, u64, u64) { @@ -929,7 +929,7 @@ fn sin(a: u64) -> (u64, u64, u64) { } } - return (102542, 65535, 65536); + (102542, 65535, 65536) } fn atan(a: u64) -> (u64, u64, u64) { @@ -1233,7 +1233,7 @@ fn atan(a: u64) -> (u64, u64, u64) { return (44958, 39405, 39716); } - return (45416, 39716, 40025); + (45416, 39716, 40025) } fn erf_lut(x: u64) -> u64 { @@ -1925,5 +1925,6 @@ fn erf_lut(x: u64) -> u64 { return 65535; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo index 3c22fd97f..441248cf8 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo @@ -1,6 +1,4 @@ -use core::debug::PrintTrait; -use core::integer::{u64_safe_divmod, u64_as_non_zero}; -use core::option::OptionTrait; +use core::integer; use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ @@ -9,7 +7,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ }; // CONSTANTS - const TWO_PI: u64 = 411775; const PI: u64 = 205887; const HALF_PI: u64 = 102944; @@ -22,10 +19,10 @@ fn acos(a: FP16x16W) -> FP16x16W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -33,10 +30,10 @@ fn acos_fast(a: FP16x16W) -> FP16x16W { let asin_arg = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 let asin_res = asin_fast(asin_arg); - if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + if a.sign { + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -48,7 +45,8 @@ fn asin(a: FP16x16W) -> FP16x16W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan(a / div); + + atan(a / div) } fn asin_fast(a: FP16x16W) -> FP16x16W { @@ -57,7 +55,8 @@ fn asin_fast(a: FP16x16W) -> FP16x16W { } let div = (FixedTrait::ONE() - a * a).sqrt(); // will fail if a > 1 - return atan_fast(a / div); + + atan_fast(a / div) } // Calculates arctan(a) (fixed point) @@ -100,10 +99,9 @@ fn atan(a: FP16x16W) -> FP16x16W { res = res - FixedTrait::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } - fn atan_fast(a: FP16x16W) -> FP16x16W { let mut at = a.abs(); let mut shift = false; @@ -135,31 +133,32 @@ fn atan_fast(a: FP16x16W) -> FP16x16W { res = res - FixedTrait::::new(HALF_PI, false); } - return FixedTrait::new(res.mag, a.sign); + FixedTrait::new(res.mag, a.sign) } // Calculates cos(a) with a in radians (fixed point) fn cos(a: FP16x16W) -> FP16x16W { - return sin(FixedTrait::new(HALF_PI, false) - a); + sin(FixedTrait::new(HALF_PI, false) - a) } fn cos_fast(a: FP16x16W) -> FP16x16W { - return sin_fast(FixedTrait::new(HALF_PI, false) - a); + sin_fast(FixedTrait::new(HALF_PI, false) - a) } fn sin(a: FP16x16W) -> FP16x16W { let a1 = a.mag % TWO_PI; - let (whole_rem, partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let a2 = FixedTrait::new(partial_rem, false); let partial_sign = whole_rem == 1; let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE()); - return FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0); + + FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0) } fn sin_fast(a: FP16x16W) -> FP16x16W { let a1 = a.mag % TWO_PI; - let (whole_rem, mut partial_rem) = u64_safe_divmod(a1, u64_as_non_zero(PI)); + let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI)); let partial_sign = whole_rem == 1; if partial_rem >= HALF_PI { @@ -171,7 +170,7 @@ fn sin_fast(a: FP16x16W) -> FP16x16W { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) @@ -179,14 +178,16 @@ fn tan(a: FP16x16W) -> FP16x16W { let sinx = sin(a); let cosx = cos(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } fn tan_fast(a: FP16x16W) -> FP16x16W { let sinx = sin_fast(a); let cosx = cos_fast(a); assert(cosx.mag != 0, 'tan undefined'); - return sinx / cosx; + + sinx / cosx } // Helper function to calculate Taylor series for sin @@ -199,15 +200,13 @@ fn _sin_loop(a: FP16x16W, i: u64, acc: FP16x16W) -> FP16x16W { return new_acc; } - return _sin_loop(a, i - 1, new_acc); + _sin_loop(a, i - 1, new_acc) } // Tests -------------------------------------------------------------------------------------------------------------- #[cfg(test)] mod tests { - use core::traits::Into; - use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::{ assert_precise, assert_relative }; diff --git a/src/numbers/fixed_point/implementations/fp32x32/comp.cairo b/src/numbers/fixed_point/implementations/fp32x32/comp.cairo index 14bcf69c8..ec1043f89 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/comp.cairo @@ -1,48 +1,47 @@ -use orion::numbers::{FP32x32, FixedTrait}; -use orion::numbers::FP32x32Impl; +use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; fn xor(a: FP32x32, b: FP32x32) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP32x32, b: FP32x32) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP32x32, b: FP32x32) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP32x32, b: FP32x32, c: FP32x32) -> FP32x32 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP32x32, b: FP32x32) -> FP32x32 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP32x32, b: FP32x32) -> FP32x32 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP32x32, b: FP32x32) -> FP32x32 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index e7fd8e24d..ee38799da 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -1,177 +1,174 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use cubit::f64 as fp32x32; use cubit::f64::Fixed as FP32x32; use cubit::f64::{ONE, HALF}; use cubit::f64::types::fixed; -use orion::numbers::fixed_point::implementations::fp32x32::erf; use orion::numbers::fixed_point::core::{FixedTrait}; +use orion::numbers::fixed_point::implementations::fp32x32::erf; use orion::numbers::fixed_point::utils; const MAX: u64 = 9223372036854775808; impl FP32x32Impl of FixedTrait { fn ZERO() -> FP32x32 { - return FP32x32 { mag: 0, sign: false }; + FP32x32 { mag: 0, sign: false } } fn HALF() -> FP32x32 { - return FP32x32 { mag: HALF, sign: false }; + FP32x32 { mag: HALF, sign: false } } fn ONE() -> FP32x32 { - return FP32x32 { mag: ONE, sign: false }; + FP32x32 { mag: ONE, sign: false } } fn MAX() -> FP32x32 { - return FP32x32 { mag: MAX, sign: false }; + FP32x32 { mag: MAX, sign: false } } fn new(mag: u64, sign: bool) -> FP32x32 { - return FP32x32 { mag: mag, sign: sign }; + FP32x32 { mag: mag, sign: sign } } fn new_unscaled(mag: u64, sign: bool) -> FP32x32 { - return FP32x32 { mag: mag * ONE, sign: sign }; + FP32x32 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP32x32 { let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP32x32) -> FP32x32 { - return fp32x32::ops::abs(self); + fp32x32::ops::abs(self) } fn acos(self: FP32x32) -> FP32x32 { - return fp32x32::trig::acos_fast(self); + fp32x32::trig::acos_fast(self) } fn acos_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::acos_fast(self); + fp32x32::trig::acos_fast(self) } fn acosh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::acosh(self); + fp32x32::hyp::acosh(self) } fn asin(self: FP32x32) -> FP32x32 { - return fp32x32::trig::asin_fast(self); + fp32x32::trig::asin_fast(self) } fn asin_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::asin_fast(self); + fp32x32::trig::asin_fast(self) } fn asinh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::asinh(self); + fp32x32::hyp::asinh(self) } fn atan(self: FP32x32) -> FP32x32 { - return fp32x32::trig::atan_fast(self); + fp32x32::trig::atan_fast(self) } fn atan_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::atan_fast(self); + fp32x32::trig::atan_fast(self) } fn atanh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::atanh(self); + fp32x32::hyp::atanh(self) } fn ceil(self: FP32x32) -> FP32x32 { - return fp32x32::ops::ceil(self); + fp32x32::ops::ceil(self) } fn cos(self: FP32x32) -> FP32x32 { - return fp32x32::trig::cos_fast(self); + fp32x32::trig::cos_fast(self) } fn cos_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::cos_fast(self); + fp32x32::trig::cos_fast(self) } fn cosh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::cosh(self); + fp32x32::hyp::cosh(self) } fn floor(self: FP32x32) -> FP32x32 { - return fp32x32::ops::floor(self); + fp32x32::ops::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP32x32) -> FP32x32 { - return fp32x32::ops::exp(self); + fp32x32::ops::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP32x32) -> FP32x32 { - return fp32x32::ops::exp2(self); + fp32x32::ops::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP32x32) -> FP32x32 { - return fp32x32::ops::ln(self); + fp32x32::ops::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP32x32) -> FP32x32 { - return fp32x32::ops::log2(self); + fp32x32::ops::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP32x32) -> FP32x32 { - return fp32x32::ops::log10(self); + fp32x32::ops::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP32x32, b: FP32x32) -> FP32x32 { - return fp32x32::ops::pow(self, b); + fp32x32::ops::pow(self, b) } fn round(self: FP32x32) -> FP32x32 { - return fp32x32::ops::round(self); + fp32x32::ops::round(self) } fn sin(self: FP32x32) -> FP32x32 { - return fp32x32::trig::sin_fast(self); + fp32x32::trig::sin_fast(self) } fn sin_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::sin_fast(self); + fp32x32::trig::sin_fast(self) } fn sinh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::sinh(self); + fp32x32::hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP32x32) -> FP32x32 { - return fp32x32::ops::sqrt(self); + fp32x32::ops::sqrt(self) } fn tan(self: FP32x32) -> FP32x32 { - return fp32x32::trig::tan_fast(self); + fp32x32::trig::tan_fast(self) } fn tan_fast(self: FP32x32) -> FP32x32 { - return fp32x32::trig::tan_fast(self); + fp32x32::trig::tan_fast(self) } fn tanh(self: FP32x32) -> FP32x32 { - return fp32x32::hyp::tanh(self); + fp32x32::hyp::tanh(self) } fn sign(self: FP32x32) -> FP32x32 { @@ -179,7 +176,7 @@ impl FP32x32Impl of FixedTrait { } fn NaN() -> FP32x32 { - return FP32x32 { mag: 0, sign: true }; + FP32x32 { mag: 0, sign: true } } fn is_nan(self: FP32x32) -> bool { @@ -187,15 +184,15 @@ impl FP32x32Impl of FixedTrait { } fn INF() -> FP32x32 { - return FP32x32 { mag: 4294967295, sign: false }; + FP32x32 { mag: 4294967295, sign: false } } fn POS_INF() -> FP32x32 { - return FP32x32 { mag: 4294967295, sign: false }; + FP32x32 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP32x32 { - return FP32x32 { mag: 4294967295, sign: true }; + FP32x32 { mag: 4294967295, sign: true } } fn is_inf(self: FP32x32) -> bool { @@ -211,11 +208,10 @@ impl FP32x32Impl of FixedTrait { } fn erf(self: FP32x32) -> FP32x32 { - return erf::erf(self); + erf::erf(self) } } - impl FP32x32Print of PrintTrait { fn print(self: FP32x32) { self.sign.print(); @@ -229,9 +225,9 @@ impl FP32x32IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -239,10 +235,10 @@ impl FP32x32IntoFelt252 of Into { impl FP32x32TryIntoU64 of TryInto { fn try_into(self: FP32x32) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -253,7 +249,7 @@ impl FP32x32TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -264,7 +260,7 @@ impl FP32x32TryIntoU32 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -275,7 +271,7 @@ impl FP32x32TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -300,7 +296,7 @@ impl FP32x32TryIntoI8 of TryInto { impl FP32x32Add of Add { fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::add(lhs, rhs); + fp32x32::ops::add(lhs, rhs) } } @@ -313,7 +309,7 @@ impl FP32x32AddEq of AddEq { impl FP32x32Sub of Sub { fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::sub(lhs, rhs); + fp32x32::ops::sub(lhs, rhs) } } @@ -326,7 +322,7 @@ impl FP32x32SubEq of SubEq { impl FP32x32Mul of Mul { fn mul(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::mul(lhs, rhs); + fp32x32::ops::mul(lhs, rhs) } } @@ -339,7 +335,7 @@ impl FP32x32MulEq of MulEq { impl FP32x32Div of Div { fn div(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::div(lhs, rhs); + fp32x32::ops::div(lhs, rhs) } } @@ -353,45 +349,44 @@ impl FP32x32DivEq of DivEq { impl FP32x32PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::ge(lhs, rhs); + fp32x32::ops::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::gt(lhs, rhs); + fp32x32::ops::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::le(lhs, rhs); + fp32x32::ops::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP32x32, rhs: FP32x32) -> bool { - return fp32x32::ops::lt(lhs, rhs); + fp32x32::ops::lt(lhs, rhs) } } impl FP32x32Neg of Neg { #[inline(always)] fn neg(a: FP32x32) -> FP32x32 { - return fp32x32::ops::neg(a); + fp32x32::ops::neg(a) } } impl FP32x32Rem of Rem { #[inline(always)] fn rem(lhs: FP32x32, rhs: FP32x32) -> FP32x32 { - return fp32x32::ops::rem(lhs, rhs); + fp32x32::ops::rem(lhs, rhs) } } fn eq(a: @FP32x32, b: @FP32x32) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } /// INTERNAL - fn _i8_try_from_fp(x: FP32x32) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); diff --git a/src/numbers/fixed_point/implementations/fp32x32/erf.cairo b/src/numbers/fixed_point/implementations/fp32x32/erf.cairo index 63ee48f85..6ebff1eec 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/erf.cairo @@ -1,7 +1,6 @@ -use core::traits::Into; -use orion::numbers::{FP32x32, FixedTrait}; use cubit::f64::ONE; +use orion::numbers::{FP32x32, FixedTrait}; use orion::numbers::fixed_point::implementations::fp32x32::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u64 = 100; @@ -22,5 +21,6 @@ fn erf(x: FP32x32) -> FP32x32 { } else { erf_value = ONE; } + FP32x32 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp32x32/lut.cairo b/src/numbers/fixed_point/implementations/fp32x32/lut.cairo index 03a576452..59173612b 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/lut.cairo @@ -689,5 +689,6 @@ fn erf_lut(x: u64) -> u64 { return 4294960759; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp64x64/comp.cairo b/src/numbers/fixed_point/implementations/fp64x64/comp.cairo index 121336680..e1dc177f0 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/comp.cairo @@ -3,46 +3,46 @@ use orion::numbers::FP64x64Impl; fn xor(a: FP64x64, b: FP64x64) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } fn or(a: FP64x64, b: FP64x64) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } fn and(a: FP64x64, b: FP64x64) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP64x64, b: FP64x64, c: FP64x64) -> FP64x64 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } fn bitwise_and(a: FP64x64, b: FP64x64) -> FP64x64 { - return FixedTrait::new(a.mag & b.mag, a.sign & b.sign); + FixedTrait::new(a.mag & b.mag, a.sign & b.sign) } fn bitwise_xor(a: FP64x64, b: FP64x64) -> FP64x64 { - return FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign); + FixedTrait::new(a.mag ^ b.mag, a.sign ^ b.sign) } fn bitwise_or(a: FP64x64, b: FP64x64) -> FP64x64 { - return FixedTrait::new(a.mag | b.mag, a.sign | b.sign); + FixedTrait::new(a.mag | b.mag, a.sign | b.sign) } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index 23af67564..e11fcd9a4 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -1,9 +1,5 @@ use core::debug::PrintTrait; -use core::option::OptionTrait; -use core::result::{ResultTrait, ResultTraitImpl}; -use core::traits::{TryInto, Into}; - use cubit::f128 as fp64x64; use cubit::f128::types::Fixed as FP64x64; use cubit::f128::ONE_u128 as ONE; @@ -17,161 +13,162 @@ const HALF: u128 = 9223372036854775808_u128; // 2 ** 63 impl FP64x64Impl of FixedTrait { fn ZERO() -> FP64x64 { - return FP64x64 { mag: 0, sign: false }; + FP64x64 { mag: 0, sign: false } } fn HALF() -> FP64x64 { - return FP64x64 { mag: HALF, sign: false }; + FP64x64 { mag: HALF, sign: false } } fn ONE() -> FP64x64 { - return FP64x64 { mag: ONE, sign: false }; + FP64x64 { mag: ONE, sign: false } } fn MAX() -> FP64x64 { - return FP64x64 { mag: MAX, sign: false }; + FP64x64 { mag: MAX, sign: false } } fn new(mag: u128, sign: bool) -> FP64x64 { - return FP64x64 { mag: mag, sign: sign }; + FP64x64 { mag: mag, sign: sign } } fn new_unscaled(mag: u128, sign: bool) -> FP64x64 { - return FP64x64 { mag: mag * ONE, sign: sign }; + FP64x64 { mag: mag * ONE, sign: sign } } fn from_felt(val: felt252) -> FP64x64 { let mag = core::integer::u128_try_from_felt252(utils::felt_abs(val)).unwrap(); - return FixedTrait::new(mag, utils::felt_sign(val)); + + FixedTrait::new(mag, utils::felt_sign(val)) } fn abs(self: FP64x64) -> FP64x64 { - return fp64x64::ops::abs(self); + fp64x64::ops::abs(self) } fn acos(self: FP64x64) -> FP64x64 { - return fp64x64::trig::acos_fast(self); + fp64x64::trig::acos_fast(self) } fn acos_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::acos_fast(self); + fp64x64::trig::acos_fast(self) } fn acosh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::acosh(self); + fp64x64::hyp::acosh(self) } fn asin(self: FP64x64) -> FP64x64 { - return fp64x64::trig::asin_fast(self); + fp64x64::trig::asin_fast(self) } fn asin_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::asin_fast(self); + fp64x64::trig::asin_fast(self) } fn asinh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::asinh(self); + fp64x64::hyp::asinh(self) } fn atan(self: FP64x64) -> FP64x64 { - return fp64x64::trig::atan_fast(self); + fp64x64::trig::atan_fast(self) } fn atan_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::atan_fast(self); + fp64x64::trig::atan_fast(self) } fn atanh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::atanh(self); + fp64x64::hyp::atanh(self) } fn ceil(self: FP64x64) -> FP64x64 { - return fp64x64::ops::ceil(self); + fp64x64::ops::ceil(self) } fn cos(self: FP64x64) -> FP64x64 { - return fp64x64::trig::cos_fast(self); + fp64x64::trig::cos_fast(self) } fn cos_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::cos_fast(self); + fp64x64::trig::cos_fast(self) } fn cosh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::cosh(self); + fp64x64::hyp::cosh(self) } fn floor(self: FP64x64) -> FP64x64 { - return fp64x64::ops::floor(self); + fp64x64::ops::floor(self) } // Calculates the natural exponent of x: e^x fn exp(self: FP64x64) -> FP64x64 { - return fp64x64::ops::exp(self); + fp64x64::ops::exp(self) } // Calculates the binary exponent of x: 2^x fn exp2(self: FP64x64) -> FP64x64 { - return fp64x64::ops::exp2(self); + fp64x64::ops::exp2(self) } // Calculates the natural logarithm of x: ln(x) // self must be greater than zero fn ln(self: FP64x64) -> FP64x64 { - return fp64x64::ops::ln(self); + fp64x64::ops::ln(self) } // Calculates the binary logarithm of x: log2(x) // self must be greather than zero fn log2(self: FP64x64) -> FP64x64 { - return fp64x64::ops::log2(self); + fp64x64::ops::log2(self) } // Calculates the base 10 log of x: log10(x) // self must be greater than zero fn log10(self: FP64x64) -> FP64x64 { - return fp64x64::ops::log10(self); + fp64x64::ops::log10(self) } // Calclates the value of x^y and checks for overflow before returning // self is a fixed point value // b is a fixed point value fn pow(self: FP64x64, b: FP64x64) -> FP64x64 { - return fp64x64::ops::pow(self, b); + fp64x64::ops::pow(self, b) } fn round(self: FP64x64) -> FP64x64 { - return fp64x64::ops::round(self); + fp64x64::ops::round(self) } fn sin(self: FP64x64) -> FP64x64 { - return fp64x64::trig::sin_fast(self); + fp64x64::trig::sin_fast(self) } fn sin_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::sin_fast(self); + fp64x64::trig::sin_fast(self) } fn sinh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::sinh(self); + fp64x64::hyp::sinh(self) } // Calculates the square root of a fixed point value // x must be positive fn sqrt(self: FP64x64) -> FP64x64 { - return fp64x64::ops::sqrt(self); + fp64x64::ops::sqrt(self) } fn tan(self: FP64x64) -> FP64x64 { - return fp64x64::trig::tan_fast(self); + fp64x64::trig::tan_fast(self) } fn tan_fast(self: FP64x64) -> FP64x64 { - return fp64x64::trig::tan_fast(self); + fp64x64::trig::tan_fast(self) } fn tanh(self: FP64x64) -> FP64x64 { - return fp64x64::hyp::tanh(self); + fp64x64::hyp::tanh(self) } fn sign(self: FP64x64) -> FP64x64 { @@ -179,7 +176,7 @@ impl FP64x64Impl of FixedTrait { } fn NaN() -> FP64x64 { - return FP64x64 { mag: 0, sign: true }; + FP64x64 { mag: 0, sign: true } } fn is_nan(self: FP64x64) -> bool { @@ -187,15 +184,15 @@ impl FP64x64Impl of FixedTrait { } fn INF() -> FP64x64 { - return FP64x64 { mag: 4294967295, sign: false }; + FP64x64 { mag: 4294967295, sign: false } } fn POS_INF() -> FP64x64 { - return FP64x64 { mag: 4294967295, sign: false }; + FP64x64 { mag: 4294967295, sign: false } } fn NEG_INF() -> FP64x64 { - return FP64x64 { mag: 4294967295, sign: true }; + FP64x64 { mag: 4294967295, sign: true } } fn is_inf(self: FP64x64) -> bool { @@ -211,11 +208,10 @@ impl FP64x64Impl of FixedTrait { } fn erf(self: FP64x64) -> FP64x64 { - return erf::erf(self); + erf::erf(self) } } - impl FP64x64Print of PrintTrait { fn print(self: FP64x64) { self.sign.print(); @@ -229,9 +225,9 @@ impl FP64x64IntoFelt252 of Into { let mag_felt = self.mag.into(); if self.sign { - return mag_felt * -1; + mag_felt * -1 } else { - return mag_felt * 1; + mag_felt * 1 } } } @@ -239,10 +235,10 @@ impl FP64x64IntoFelt252 of Into { impl FP64x64TryIntoU128 of TryInto { fn try_into(self: FP64x64) -> Option { if self.sign { - return Option::None(()); + Option::None(()) } else { // Unscale the magnitude and round down - return Option::Some((self.mag / ONE).into()); + Option::Some((self.mag / ONE).into()) } } } @@ -253,7 +249,7 @@ impl FP64x64TryIntoU16 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -264,7 +260,7 @@ impl FP64x64TryIntoU32 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -275,7 +271,7 @@ impl FP64x64TryIntoU8 of TryInto { Option::None(()) } else { // Unscale the magnitude and round down - return (self.mag / ONE).try_into(); + (self.mag / ONE).try_into() } } } @@ -300,7 +296,7 @@ impl FP64x64TryIntoI8 of TryInto { impl FP64x64Add of Add { fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::add(lhs, rhs); + fp64x64::ops::add(lhs, rhs) } } @@ -313,7 +309,7 @@ impl FP64x64AddEq of AddEq { impl FP64x64Sub of Sub { fn sub(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::sub(lhs, rhs); + fp64x64::ops::sub(lhs, rhs) } } @@ -326,7 +322,7 @@ impl FP64x64SubEq of SubEq { impl FP64x64Mul of Mul { fn mul(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::mul(lhs, rhs); + fp64x64::ops::mul(lhs, rhs) } } @@ -339,7 +335,7 @@ impl FP64x64MulEq of MulEq { impl FP64x64Div of Div { fn div(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::div(lhs, rhs); + fp64x64::ops::div(lhs, rhs) } } @@ -353,45 +349,44 @@ impl FP64x64DivEq of DivEq { impl FP64x64PartialOrd of PartialOrd { #[inline(always)] fn ge(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::ge(lhs, rhs); + fp64x64::ops::ge(lhs, rhs) } #[inline(always)] fn gt(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::gt(lhs, rhs); + fp64x64::ops::gt(lhs, rhs) } #[inline(always)] fn le(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::le(lhs, rhs); + fp64x64::ops::le(lhs, rhs) } #[inline(always)] fn lt(lhs: FP64x64, rhs: FP64x64) -> bool { - return fp64x64::ops::lt(lhs, rhs); + fp64x64::ops::lt(lhs, rhs) } } impl FP64x64Neg of Neg { #[inline(always)] fn neg(a: FP64x64) -> FP64x64 { - return fp64x64::ops::neg(a); + fp64x64::ops::neg(a) } } impl FP64x64Rem of Rem { #[inline(always)] fn rem(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { - return fp64x64::ops::rem(lhs, rhs); + fp64x64::ops::rem(lhs, rhs) } } fn eq(a: @FP64x64, b: @FP64x64) -> bool { - return (*a.mag == *b.mag) && (*a.sign == *b.sign); + (*a.mag == *b.mag) && (*a.sign == *b.sign) } /// INTERNAL - fn _i8_try_from_fp(x: FP64x64) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); diff --git a/src/numbers/fixed_point/implementations/fp64x64/erf.cairo b/src/numbers/fixed_point/implementations/fp64x64/erf.cairo index 3f5101b65..1558f1e24 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/erf.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/erf.cairo @@ -1,7 +1,6 @@ -use core::traits::Into; -use orion::numbers::{FP64x64, FixedTrait}; use cubit::f128::ONE_u128 as ONE; +use orion::numbers::{FP64x64, FixedTrait}; use orion::numbers::fixed_point::implementations::fp64x64::lut::erf_lut; const ERF_COMPUTATIONAL_ACCURACY: u128 = 100_u128; @@ -22,5 +21,6 @@ fn erf(x: FP64x64) -> FP64x64 { } else { erf_value = ONE; } + FP64x64 { mag: erf_value, sign: x.sign } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/lut.cairo b/src/numbers/fixed_point/implementations/fp64x64/lut.cairo index 34042bf26..23487a032 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/lut.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/lut.cairo @@ -689,5 +689,6 @@ fn erf_lut(x: u128) -> u128 { return 18446715997887504384; } } - return ONE; + + ONE } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo index 81ab12895..88bc12d9e 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo @@ -3,26 +3,26 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::{ }; fn max(a: FP8x23, b: FP8x23) -> FP8x23 { - if (a >= b) { - return a; + if a >= b { + a } else { - return b; + b } } fn min(a: FP8x23, b: FP8x23) -> FP8x23 { - if (a <= b) { - return a; + if a <= b { + a } else { - return b; + b } } fn xor(a: FP8x23, b: FP8x23) -> bool { if (a == FixedTrait::new(0, false) || b == FixedTrait::new(0, false)) && (a != b) { - return true; + true } else { - return false; + false } } @@ -30,9 +30,9 @@ fn or(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); if a == zero && b == zero { - return false; + false } else { - return true; + true } } @@ -40,17 +40,17 @@ fn and(a: FP8x23, b: FP8x23) -> bool { let zero = FixedTrait::new(0, false); if a == zero || b == zero { - return false; + false } else { - return true; + true } } fn where(a: FP8x23, b: FP8x23, c: FP8x23) -> FP8x23 { if a == FixedTrait::new(0, false) { - return c; + c } else { - return b; + b } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo index 60b98877a..c347d9817 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo @@ -22,9 +22,9 @@ fn add(a: FP8x23, b: FP8x23) -> FP8x23 { } if (a.mag > b.mag) { - return FixedTrait::new(a.mag - b.mag, a.sign); + FixedTrait::new(a.mag - b.mag, a.sign) } else { - return FixedTrait::new(b.mag - a.mag, b.sign); + FixedTrait::new(b.mag - a.mag, b.sign) } } @@ -32,13 +32,13 @@ fn ceil(a: FP8x23) -> FP8x23 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div + 1, false); + FixedTrait::new_unscaled(div + 1, false) } else if div == 0 { - return FixedTrait::new_unscaled(0, false); + FixedTrait::new_unscaled(0, false) } else { - return FixedTrait::new_unscaled(div, true); + FixedTrait::new_unscaled(div, true) } } @@ -83,9 +83,9 @@ fn exp2(a: FP8x23) -> FP8x23 { } if a.sign { - return FixedTrait::ONE() / res_u; + FixedTrait::ONE() / res_u } else { - return res_u; + res_u } } @@ -97,35 +97,35 @@ fn floor(a: FP8x23) -> FP8x23 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if rem == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new_unscaled(div, false); + FixedTrait::new_unscaled(div, false) } else { - return FixedTrait::new_unscaled(div + 1, true); + FixedTrait::new_unscaled(div + 1, true) } } fn ge(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag > b.mag) ^ a.sign) } } fn gt(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return !a.sign; + !a.sign } else { - return (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag > b.mag) ^ a.sign) } } fn le(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign); + (a.mag == b.mag) || ((a.mag < b.mag) ^ a.sign) } } @@ -152,7 +152,7 @@ fn log2(a: FP8x23) -> FP8x23 { let (msb, div) = lut::msb(whole); if a.mag == div * ONE { - return FixedTrait::new_unscaled(msb, false); + FixedTrait::new_unscaled(msb, false) } else { let norm = a / FixedTrait::new_unscaled(div, false); let r8 = FixedTrait::new(76243, true) * norm; @@ -163,7 +163,8 @@ fn log2(a: FP8x23) -> FP8x23 { let r3 = (r4 + FixedTrait::new(77896489, false)) * norm; let r2 = (r3 + FixedTrait::new(83945943, true)) * norm; let r1 = (r2 + FixedTrait::new(68407458, false)) * norm; - return r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false); + + r1 + FixedTrait::new(28734280, true) + FixedTrait::new_unscaled(msb, false) } } @@ -175,9 +176,9 @@ fn log10(a: FP8x23) -> FP8x23 { fn lt(a: FP8x23, b: FP8x23) -> bool { if a.sign != b.sign { - return a.sign; + a.sign } else { - return (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign); + (a.mag != b.mag) && ((a.mag < b.mag) ^ a.sign) } } @@ -194,11 +195,11 @@ fn ne(a: @FP8x23, b: @FP8x23) -> bool { fn neg(a: FP8x23) -> FP8x23 { if a.mag == 0 { - return a; + a } else if !a.sign { - return FixedTrait::new(a.mag, !a.sign); + FixedTrait::new(a.mag, !a.sign) } else { - return FixedTrait::new(a.mag, false); + FixedTrait::new(a.mag, false) } } @@ -255,9 +256,9 @@ fn round(a: FP8x23) -> FP8x23 { let (div, rem) = integer::u32_safe_divmod(a.mag, integer::u32_as_non_zero(ONE)); if (HALF <= rem) { - return FixedTrait::new_unscaled(div + 1, a.sign); + FixedTrait::new_unscaled(div + 1, a.sign) } else { - return FixedTrait::new_unscaled(div, a.sign); + FixedTrait::new_unscaled(div, a.sign) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo index 0adddc539..2a0db31e2 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo @@ -20,9 +20,9 @@ fn acos(a: FP8x23) -> FP8x23 { let asin_res = asin(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -31,9 +31,9 @@ fn acos_fast(a: FP8x23) -> FP8x23 { let asin_res = asin_fast(asin_arg); if (a.sign) { - return FixedTrait::new(PI, false) - asin_res; + FixedTrait::new(PI, false) - asin_res } else { - return asin_res; + asin_res } } @@ -170,7 +170,7 @@ fn sin_fast(a: FP8x23) -> FP8x23 { let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false)) + FixedTrait::::new(low, false); - return FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0); + FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0) } // Calculates tan(a) with a in radians (fixed point) diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo index bb1c373ea..6b9ea2ef8 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo @@ -3,7 +3,7 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ }; fn max(a: FP8x23W, b: FP8x23W) -> FP8x23W { - if (a >= b) { + if a >= b { a } else { b @@ -11,7 +11,7 @@ fn max(a: FP8x23W, b: FP8x23W) -> FP8x23W { } fn min(a: FP8x23W, b: FP8x23W) -> FP8x23W { - if (a <= b) { + if a <= b { a } else { b diff --git a/src/numbers/fixed_point/utils.cairo b/src/numbers/fixed_point/utils.cairo index b680548e5..bed384bb1 100644 --- a/src/numbers/fixed_point/utils.cairo +++ b/src/numbers/fixed_point/utils.cairo @@ -14,7 +14,7 @@ fn felt_sign(a: felt252) -> bool { fn felt_abs(a: felt252) -> felt252 { let a_sign = felt_sign(a); - if (a_sign == true) { + if a_sign { return a * -1; } else { return a * 1; @@ -30,11 +30,11 @@ mod tests { fn test_sign() { let min = -1809251394333065606848661391547535052811553607665798349986546028067936010240; let max = 1809251394333065606848661391547535052811553607665798349986546028067936010240; - assert(felt_sign(min) == true, 'invalid result'); - assert(felt_sign(-1) == true, 'invalid result'); - assert(felt_sign(0) == false, 'invalid result'); - assert(felt_sign(1) == false, 'invalid result'); - assert(felt_sign(max) == false, 'invalid result'); + assert(felt_sign(min), 'invalid result'); + assert(felt_sign(-1), 'invalid result'); + assert(!felt_sign(0), 'invalid result'); + assert(!felt_sign(1), 'invalid result'); + assert(!felt_sign(max), 'invalid result'); } #[test] diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index b9bed234a..dc9c56c4e 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -1,13 +1,8 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use orion::numbers::FP16x16; - -use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::numbers::{FP16x16, FP32x32, FP32x32Impl, FixedTrait}; use orion::numbers::NumberTrait; -use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::nn::{NNTrait, FP16x16NN}; - +use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{Tensor, TensorTrait}; #[derive(Destruct)] struct LinearClassifier { @@ -18,7 +13,6 @@ struct LinearClassifier { post_transform: POST_TRANSFORM, } - #[derive(Copy, Drop)] enum POST_TRANSFORM { NONE, @@ -154,7 +148,7 @@ impl LinearClassifierImpl< > of LinearClassifierTrait { fn predict(ref self: LinearClassifier, X: Tensor) -> (Span, Tensor) { let n: usize = self.coefficients.len() / *(X.shape).at(1); - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(n); shape.append(*(X.shape).at(1)); @@ -178,7 +172,7 @@ impl LinearClassifierImpl< Option::None => { (0, ArrayTrait::::new().span()) }, }; if *coefficients.shape.at(1) == 1 && n_classes == 2 { - let mut new_scores = ArrayTrait::new(); + let mut new_scores = array![]; loop { match scores.data.pop_front() { @@ -189,6 +183,7 @@ impl LinearClassifierImpl< Option::None => { break; }, } }; + scores = TensorTrait::new(array![*scores.shape.at(0), 2].span(), new_scores.span()); } // Post Transform @@ -201,7 +196,7 @@ impl LinearClassifierImpl< }; // Labels - let mut labels_list = ArrayTrait::new(); + let mut labels_list = array![]; if *scores.shape.at(1) > 1 { let mut labels = scores.argmax(1, Option::None, Option::None); loop { @@ -214,54 +209,46 @@ impl LinearClassifierImpl< let mut i = 0; match self.post_transform { POST_TRANSFORM::NONE => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::zero() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, POST_TRANSFORM::SOFTMAX => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, POST_TRANSFORM::LOGISTIC => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { - loop { - if i == scores.data.len() { - break; - } + while i != scores.data.len() { if *scores.data.at(i) >= NumberTrait::half() { labels_list.append(*classlabels[0]); } else { labels_list.append(0); } + i += 1; }; }, @@ -273,12 +260,11 @@ impl LinearClassifierImpl< } } - fn max(a: usize, b: usize) -> usize { if a > b { - return a; + a } else { - return b; + b } } diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index 75e461729..92ea06edb 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -1,18 +1,10 @@ -use core::array::ArrayTrait; -use core::clone::Clone; -use core::traits::Into; -use core::array::SpanTrait; -use core::dict::Felt252DictTrait; -use core::dict::Felt252DictEntryTrait; -use orion::numbers::FP16x16; +use core::debug::PrintTrait; -use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::numbers::{FP16x16, FP32x32, FP32x32Impl, FixedTrait}; use orion::numbers::NumberTrait; -use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; - -use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; +use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{Tensor, TensorTrait}; #[derive(Destruct)] struct LinearRegressor { @@ -191,7 +183,7 @@ impl LinearRegressorImpl< > of LinearRegressorTrait { fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor { let n: usize = self.coefficients.len() / self.target; - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(self.target); shape.append(n); let mut coefficients = TensorTrait::new(shape.span(), self.coefficients); @@ -201,7 +193,7 @@ impl LinearRegressorImpl< match self.intercepts { Option::Some(intercepts) => { - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(1); shape.append(intercepts.len()); let intercepts = TensorTrait::new(shape.span(), intercepts); diff --git a/src/operators/ml/svm/core.cairo b/src/operators/ml/svm/core.cairo index 156fea8ee..365cb0c1b 100644 --- a/src/operators/ml/svm/core.cairo +++ b/src/operators/ml/svm/core.cairo @@ -1,13 +1,8 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::traits::Into; use orion::numbers::NumberTrait; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; use orion::utils::get_row; #[derive(Copy, Drop)] @@ -18,7 +13,6 @@ enum KERNEL_TYPE { SIGMOID, } - fn kernel_dot< T, MAG, @@ -51,7 +45,8 @@ fn kernel_dot< NumberTrait::tanh(s) }, }; - return s; + + s } @@ -62,15 +57,12 @@ fn sv_dot< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + *pA.at(i) * *pB.at(i); i += 1; }; - return sum; + sum } fn squared_diff< @@ -89,12 +81,10 @@ fn squared_diff< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); i += 1; }; - return sum; + + sum } diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo index fcaee16e3..4c7e8b3d2 100644 --- a/src/operators/ml/svm/svm_classifier.cairo +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -1,21 +1,15 @@ -use core::array::ArrayTrait; use orion::numbers::NumberTrait; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FP64x64, FP64x64Impl, FixedTrait}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; +use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; +use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; +use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; - use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; -use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; - -use orion::numbers::{FP64x64, FP64x64Impl}; -use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor}; -use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; use orion::utils::get_row; -use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; - - #[derive(Copy, Drop, Destruct)] struct SVMClassifier { classlabels: Span, @@ -30,7 +24,6 @@ struct SVMClassifier { vectors_per_class: Option>, } - #[derive(Copy, Drop)] enum POST_TRANSFORM { NONE, @@ -40,14 +33,12 @@ enum POST_TRANSFORM { PROBIT, } - #[derive(Copy, Drop)] enum MODE { SVM_LINEAR, SVM_SVC, } - /// /// predict - Returns the top class for each of N inputs. trait SVMClassifierTrait { @@ -250,7 +241,6 @@ trait SVMClassifierTrait { fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor); } - impl SVMClassifierImpl< T, MAG, @@ -272,19 +262,17 @@ impl SVMClassifierImpl< fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor) { let mut vector_count_ = 0; let class_count_ = max(self.classlabels.len(), 1); - let mut starting_vector_ = ArrayTrait::new(); + let mut starting_vector_: Array = array![]; let (vectors_per_class_, starting_vector_) = match self.vectors_per_class { Option::Some(vectors_per_class) => { let mut i = 0; - loop { - if i == vectors_per_class.len() { - break; - } + while i != vectors_per_class.len() { starting_vector_.append(vector_count_); vector_count_ += *vectors_per_class.at(i); i += 1; }; + (vectors_per_class, starting_vector_.span()) }, Option::None => { (array![].span(), array![].span()) }, @@ -320,22 +308,17 @@ impl SVMClassifierImpl< // SVM let (res, votes) = match mode { MODE::SVM_LINEAR => { - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let mut n = 0; - loop { - if n == *X.shape.at(0) { - break; - } + while n != *X.shape.at(0) { let mut x_n = get_row(@X, n); let scores = run_linear(ref self, x_n, coefs, class_count_, kernel_type_); let mut i = 0; - loop { - if i == scores.len() { - break; - } + while i != scores.len() { res.append(*scores.at(i)); i += 1; }; + n += 1; }; @@ -345,13 +328,10 @@ impl SVMClassifierImpl< ) }, MODE::SVM_SVC => { - let mut res = ArrayTrait::new(); - let mut votes = ArrayTrait::new(); + let mut res: Array = array![]; + let mut votes: Array = array![]; let mut n = 0; - loop { - if n == *X.shape.at(0) { - break; - } + while n != *X.shape.at(0) { let mut x_n = get_row(@X, n); let (scores, mut vote) = run_svm( ref self, @@ -365,21 +345,17 @@ impl SVMClassifierImpl< vectors_per_class_ ); let mut i = 0; - loop { - if i == scores.len() { - break; - } + while i != scores.len() { res.append(*scores.at(i)); i += 1; }; + let mut i = 0; - loop { - if i == vote.len() { - break; - } + while i != vote.len() { votes.append(vote.at(i)); i += 1; }; + n += 1; }; @@ -400,20 +376,14 @@ impl SVMClassifierImpl< MODE::SVM_LINEAR => { (res, false) }, MODE::SVM_SVC => { let (scores, has_proba) = if self.prob_a.len() > 0 { - let mut scores = ArrayTrait::new(); + let mut scores: Array = array![]; let mut n = 0; - loop { - if n == *res.shape.at(0) { - break; - } + while n != *res.shape.at(0) { let res_n = get_row(@res, n); let mut s = probablities(ref self, res_n, class_count_); let mut i = 0; - loop { - if i == s.len() { - break; - } + while i != s.len() { scores.append(s.at(i)); i += 1; }; @@ -430,19 +400,17 @@ impl SVMClassifierImpl< } else { (res, false) }; + (scores, has_proba) }, }; // Finalization - let mut labels = ArrayTrait::new(); - let mut final_scores = ArrayTrait::new(); + let mut labels: Array = array![]; + let mut final_scores: Array = array![]; let mut n = 0; - loop { - if n == *scores.shape.at(0) { - break; - } + while n != *scores.shape.at(0) { let mut scores_n = get_row(@scores, n); match votes { Option::Some(votes) => { @@ -455,14 +423,13 @@ impl SVMClassifierImpl< has_proba, self.classlabels ); + let mut i = 0; - loop { - if i == new_scores.data.len() { - break; - } + while i != new_scores.data.len() { final_scores.append(*new_scores.data.at(i)); i += 1; }; + labels.append(label); }, Option::None => { @@ -474,32 +441,31 @@ impl SVMClassifierImpl< has_proba, self.classlabels ); + let mut i = 0; - loop { - if i == new_scores.data.len() { - break; - } + while i != new_scores.data.len() { final_scores.append(*new_scores.data.at(i)); i += 1; }; + labels.append(label); }, } + n += 1; }; + let labels = labels.span(); // Labels if self.classlabels.len() > 0 { - let mut class_labels = ArrayTrait::new(); + let mut class_labels: Array = array![]; let mut i = 0; - loop { - if i == labels.len() { - break; - } + while i != labels.len() { class_labels.append(*self.classlabels.at(*labels.at(i))); i += 1; }; + return ( class_labels.span(), TensorTrait::new( @@ -508,17 +474,17 @@ impl SVMClassifierImpl< ) ); } - return ( + + ( labels, TensorTrait::new( array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(), final_scores.span() ) - ); + ) } } - fn run_svm< T, MAG, @@ -544,13 +510,10 @@ fn run_svm< vectors_per_class_: Span ) -> (Array, NullableVec) { let mut evals = 0; - let mut kernels = ArrayTrait::new(); + let mut kernels: Array = array![]; let mut j = 0; - loop { - if j == vector_count_ { - break; - } + while j != vector_count_ { let sv_j = get_row(@sv, j); kernels.append(kernel_dot(self.kernel_params, X, sv_j, kernel)); j += 1; @@ -558,25 +521,17 @@ fn run_svm< let kernels = kernels.span(); - let mut scores = ArrayTrait::new(); - + let mut scores: Array = array![]; let mut votes = VecTrait::new(); VecTrait::set(ref votes, class_count_ - 1, NumberTrait::zero()); let mut i = 0; - loop { - if i == class_count_ { - break; - } - + while i != class_count_ { let si_i = *starting_vector_.at(i); let class_i_sc = *vectors_per_class_.at(i); let mut j = i + 1; - loop { - if j == class_count_ { - break; - } + while j != class_count_ { let si_j = *starting_vector_.at(j); let class_j_sc = *vectors_per_class_.at(j); @@ -606,12 +561,15 @@ fn run_svm< } else { VecTrait::set(ref votes, j, VecTrait::at(ref votes, j) + NumberTrait::one()); } + evals += 1; j += 1; }; + i += 1; }; - return (scores, votes); + + (scores, votes) } fn run_linear< @@ -633,14 +591,10 @@ fn run_linear< class_count_: usize, kernel: KERNEL_TYPE ) -> Array { - let mut scores = ArrayTrait::new(); + let mut scores: Array = array![]; let mut j = 0; - loop { - if j == class_count_ { - break; - } - + while j != class_count_ { let coefs_j = get_row(@coefs, j); let d = kernel_dot(self.kernel_params, X, coefs_j, kernel); @@ -650,9 +604,9 @@ fn run_linear< scores.append(score); j += 1; }; - return scores; -} + scores +} fn compute_final_scores< T, @@ -678,7 +632,6 @@ fn compute_final_scores< has_proba: bool, classlabels: Span ) -> (usize, Tensor) { - let (max_class, max_weight) = if votes.len() > 0 { let max_class = argmax_span(votes); let max_weight = *votes.at(max_class); @@ -708,7 +661,7 @@ fn compute_final_scores< write_additional_scores ); - return (label, new_scores); + (label, new_scores) } fn write_scores< @@ -725,7 +678,6 @@ fn write_scores< >( n_classes: usize, scores: Tensor, post_transform: POST_TRANSFORM, add_second_class: usize ) -> Tensor { - let new_scores = if n_classes >= 2 { let new_scores = match post_transform { POST_TRANSFORM::NONE => scores, @@ -750,6 +702,7 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::SOFTMAX => { @@ -769,6 +722,7 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::LOGISTIC => { @@ -787,6 +741,7 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::SOFTMAXZERO => { @@ -806,13 +761,15 @@ fn write_scores< } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) }; + scores }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not applicable here.'), }; new_scores }; - return new_scores; + + new_scores } fn set_score_svm< @@ -835,29 +792,29 @@ fn set_score_svm< return (*classlabels.at(1), write_additional_scores); }; }; + return (*classlabels.at(maxclass), write_additional_scores); } if max_weight >= NumberTrait::zero() { return (posclass, write_additional_scores); }; - return (negclass, write_additional_scores); + + (negclass, write_additional_scores) } fn argmax_span, +Copy, +PartialOrd,>(span: Span) -> usize { let mut max = 0; let mut i = 0; - loop { - if i == span.len() { - break; - } + while i != span.len() { if *span.at(i) > *span.at(max) { max = i; } + i += 1; }; - return max; -} + max +} fn probablities< T, @@ -880,15 +837,9 @@ fn probablities< let mut probsp2: MutMatrix = MutMatrixImpl::new(class_count_, class_count_); let mut index = 0; let mut i = 0; - loop { - if i == class_count_ { - break; - } + while i != class_count_ { let mut j = i + 1; - loop { - if j == class_count_ { - break; - } + while j != class_count_ { let val1 = sigmoid_probability( *scores.at(index), *self.prob_a.at(index), *self.prob_b.at(index) ); @@ -896,15 +847,18 @@ fn probablities< let mut val2 = NumberTrait::min( val1, NumberTrait::one() ); // ONNX : min(val2, (1 - 1.0e-7)) + probsp2.set(i, j, val2); probsp2.set(j, i, NumberTrait::one() - val2); j += 1; index += 1; }; + i += 1; }; - return multiclass_probability(class_count_, ref probsp2); + + multiclass_probability(class_count_, ref probsp2) } fn multiclass_probability< @@ -937,85 +891,64 @@ fn multiclass_probability< let eps = (NumberTrait::half() / NumberTrait::new_unscaled(a.into(), false)) / k_fp; let mut t = 0; - loop { - if t == k { - break; - } + while t != k { VecTrait::set(ref P, t, NumberTrait::one() / k_fp); let mut i = 0; let mut acc1 = NumberTrait::zero(); - loop { - if i == t { - break; - } + while i != t { let r_i = MutMatrixImpl::at(ref R, i, t); acc1 += r_i * r_i; i += 1; }; + MutMatrixImpl::set(ref Q, t, t, acc1); let mut i = 0; - loop { - if i == t { - break; - } + while i != t { MutMatrixImpl::set(ref Q, t, i, MutMatrixImpl::at(ref Q, i, t)); i += 1; }; let mut i = t + 1; let mut acc2 = NumberTrait::zero(); - loop { - if i == k { - break; - } + while i != k { let r_i = MutMatrixImpl::at(ref R, i, t); acc2 += r_i * r_i; i += 1; }; + MutMatrixImpl::set(ref Q, t, t, acc1 + acc2); let mut i = t + 1; let mut acc = NumberTrait::zero(); - loop { - if i == k { - break; - } + while i != k { acc += -MutMatrixImpl::at(ref R, i, t) * MutMatrixImpl::at(ref R, t, i); i += 1; }; let mut i = t + 1; - loop { - if i == k { - break; - } + while i != k { MutMatrixImpl::set(ref Q, t, i, acc); i += 1; }; + t += 1; }; let mut i = 0; - loop { - if i == max_iter { - break; - } - + while i != max_iter { let mut Qp = MutMatrixImpl::matrix_vector_product(ref Q, ref P); let mut pQp = dot(ref P, ref Qp); let mut max_error = NumberTrait::zero(); let mut t = 0; - loop { - if t == k { - break; - } + while t != k { let error = NumberTrait::abs(Qp.at(t) - pQp); if error > max_error { max_error = error; } + t += 1; }; @@ -1024,11 +957,7 @@ fn multiclass_probability< } let mut t = 0; - loop { - if t == k { - break; - } - + while t != k { let diff = (-VecTrait::at(ref Qp, t) + pQp) / MutMatrixImpl::at(ref Q, t, t); VecTrait::set(ref P, t, VecTrait::at(ref P, t) + diff); @@ -1045,9 +974,11 @@ fn multiclass_probability< t += 1; }; + i += 1; }; - return P; + + P } /// Computation of the matrix Qb in the multiclass_probability computation @@ -1071,10 +1002,7 @@ fn Qp_computation< let m = Qp.len; let mut i = 0_usize; - loop { - if i == m { - break (); - } + while i != m { let elem = (VecTrait::at(ref Qp, i) + diff * MutMatrixImpl::at(ref Q, t, i)) / (NumberTrait::one() + diff); @@ -1083,7 +1011,6 @@ fn Qp_computation< }; } - fn sigmoid_probability< T, MAG, @@ -1110,14 +1037,14 @@ fn sigmoid_probability< v }; - return NumberTrait::one() - v; + NumberTrait::one() - v } - fn max(a: usize, b: usize) -> usize { if a > b { return a; }; + b } @@ -1125,18 +1052,16 @@ fn min, +Drop, +PartialOrd,>(a: Span) -> T { let mut min = *a.at(0); let mut i = 0; - loop { - if i == a.len() { - break; - } + while i != a.len() { if min > *a.at(i) { min = *a.at(i); } + i += 1; }; - return min; -} + min +} fn dot_start_end< T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, @@ -1146,19 +1071,15 @@ fn dot_start_end< let mut sum = NumberTrait::zero(); let mut index_a = a_start; let mut index_b = b_start; - loop { - if index_a == a_end || index_b == b_end { - break; - } + while index_a != a_end && index_b != b_end { sum = sum + *pA.at(index_a) * *pB.at(index_b); index_a += 1; index_b += 1; }; - return sum; + sum } - fn sv_dot< T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, >( @@ -1166,15 +1087,12 @@ fn sv_dot< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + *pA.at(i) * *pB.at(i); i += 1; }; - return sum; + sum } fn squared_diff< @@ -1193,14 +1111,12 @@ fn squared_diff< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); i += 1; }; - return sum; + + sum } fn dot, +Copy, +NumberTrait, +Mul, +AddEq, +Add, +Div>( @@ -1210,14 +1126,12 @@ fn dot, +Copy, +NumberTrait, +Mul, +AddEq, +Ad let n = self.len; let mut sum: T = NumberTrait::zero(); let mut i = 0_usize; - loop { - if i == n { - break (); - } + while i != n { sum += self.at(i) * vec.at(i); i += 1; }; - return sum; + + sum } fn div_element_wise, +Add, +Div, +NumberTrait, +Drop, +Copy>( @@ -1226,10 +1140,7 @@ fn div_element_wise, +Add, +Div, +NumberTrait, +Dr let m = self.len; let mut i = 0_usize; - loop { - if i == m { - break (); - } + while i != m { VecTrait::set(ref self, i, VecTrait::at(ref self, i) / elem); i += 1; }; diff --git a/src/operators/ml/svm/svm_regressor.cairo b/src/operators/ml/svm/svm_regressor.cairo index be76931e9..433c0e78d 100644 --- a/src/operators/ml/svm/svm_regressor.cairo +++ b/src/operators/ml/svm/svm_regressor.cairo @@ -1,18 +1,14 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::traits::Into; +use core::debug::PrintTrait; + use orion::numbers::NumberTrait; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; +use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; +use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; -use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::utils::get_row; -use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; - #[derive(Copy, Drop, Destruct)] struct SVMRegressor { coefficients: Span, @@ -195,12 +191,9 @@ impl SVMRegressorImpl< (mode_, kernel_type_, sv) }; - let mut z = ArrayTrait::new(); + let mut z: Array = array![]; let mut n = 0; - loop { - if n == *X.shape.at(0) { - break; - } + while n != *X.shape.at(0) { let mut s = NumberTrait::zero(); match mode_ { MODE::SVM_LINEAR => { @@ -211,15 +204,13 @@ impl SVMRegressorImpl< MODE::SVM_SVC => { let mut x_n = get_row(@X, n); let mut j = 0; - loop { - if j == self.n_supports { - break; - } + while j != self.n_supports { let mut sv_j = get_row(@sv, j); let d = kernel_dot(self.kernel_params, x_n, sv_j, kernel_type_); s += *self.coefficients.at(j) * d; j += 1; }; + s += *self.rho.at(0); }, } @@ -233,11 +224,13 @@ impl SVMRegressorImpl< } else { z.append(s); }; + n += 1; }; // Post Transform let mut score = TensorTrait::new(array![*X.shape.at(0)].span(), z.span()); + score = match self.post_transform { POST_TRANSFORM::NONE => score, POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, 1), @@ -246,7 +239,7 @@ impl SVMRegressorImpl< POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; - return score; + score } } diff --git a/src/operators/ml/tree_ensemble/core.cairo b/src/operators/ml/tree_ensemble/core.cairo index 08b4b6ef6..09b0f3937 100644 --- a/src/operators/ml/tree_ensemble/core.cairo +++ b/src/operators/ml/tree_ensemble/core.cairo @@ -1,12 +1,10 @@ -use core::array::ArrayTrait; use alexandria_data_structures::array_ext::SpanTraitExt; -use orion::numbers::NumberTrait; -use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; -use orion::utils::get_row; - use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::ArrayTraitExt; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; +use orion::utils::get_row; #[derive(Copy, Drop, Destruct)] struct TreeEnsembleAttributes { @@ -39,7 +37,6 @@ enum NODE_MODES { LEAF } - #[generate_trait] impl TreeEnsembleImpl< T, MAG, +Drop, +Copy, +NumberTrait, +PartialOrd, +PartialEq @@ -91,18 +88,15 @@ impl TreeEnsembleImpl< index } + fn leave_index_tree(ref self: TreeEnsemble, x: Tensor) -> Tensor { - let mut outputs = ArrayTrait::new(); + let mut outputs: Array = array![]; let mut i: usize = 0; let breaker: usize = *x.shape[0]; - loop { - if i == breaker { - break; - } - + while i != breaker { let row_data: Span = get_row(@x, i); - let mut outs = ArrayTrait::new(); + let mut outs: Array = array![]; let mut tree_ids = self.tree_ids; loop { match tree_ids.pop_front() { @@ -115,6 +109,7 @@ impl TreeEnsembleImpl< Option::None => { break; } }; }; + outputs.append_all(ref outs); i += 1; }; diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index ab073a5b5..f0f5430de 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -1,27 +1,15 @@ -use core::array::ArrayTrait; -use core::clone::Clone; -use core::box::BoxTrait; -use core::traits::Into; -use core::option::OptionTrait; -use orion::operators::matrix::MutMatrixTrait; -use core::array::SpanTrait; -use core::nullable::NullableTrait; -use core::dict::Felt252DictTrait; -use core::dict::Felt252DictEntryTrait; +use core::debug::PrintTrait; use core::nullable::{match_nullable, FromNullableResult}; -use orion::operators::tensor::{Tensor, TensorTrait}; -use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; -use orion::numbers::NumberTrait; -use orion::utils::get_row; - use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::{SpanTraitExt}; -use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; +use orion::numbers::NumberTrait; +use orion::operators::matrix::{MutMatrix, MutMatrixTrait, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; - -use core::debug::PrintTrait; +use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; +use orion::utils::get_row; #[derive(Destruct)] struct TreeEnsembleClassifier { @@ -276,17 +264,9 @@ impl TreeEnsembleClassifierImpl< if self.base_values.is_some() { let mut base_values = self.base_values.unwrap(); let mut row: usize = 0; - loop { - if row == res.rows { - break; - } - + while row != res.rows { let mut col: usize = 0; - loop { - if col == res.cols { - break; - } - + while col != res.cols { let value = *base_values.pop_front().unwrap(); res.set(row, col, value); @@ -297,17 +277,9 @@ impl TreeEnsembleClassifierImpl< } } else { let mut row: usize = 0; - loop { - if row == res.rows { - break; - } - + while row != res.rows { let mut col: usize = 0; - loop { - if col == res.cols { - break; - } - + while col != res.cols { res.set(row, col, NumberTrait::zero()); col += 1 @@ -319,11 +291,7 @@ impl TreeEnsembleClassifierImpl< let mut class_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; - loop { - if i == self.class_treeids.len() { - break; - } - + while i != self.class_treeids.len() { let tid = *self.class_treeids[i]; let nid = *self.class_nodeids[i]; @@ -342,12 +310,9 @@ impl TreeEnsembleClassifierImpl< i += 1; }; - let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + let mut i: usize = 0; + while i != res.rows { let mut indices = get_row(@leaves_index, i); let mut t_index: Array> = ArrayTrait::new(); loop { @@ -398,6 +363,7 @@ impl TreeEnsembleClassifierImpl< Option::None => { break; } }; }; + i += 1; }; @@ -411,10 +377,8 @@ impl TreeEnsembleClassifierImpl< Option::Some(c_id) => { class_id = *c_id; }, Option::None => { class_id = 0; } }; - loop { - if i == self.class_ids.len() { - break; - } + + while i != self.class_ids.len() { match class_ids.pop_front() { Option::Some(c_id) => { if *c_id == class_id { @@ -433,24 +397,20 @@ impl TreeEnsembleClassifierImpl< if binary { let mut new_res: MutMatrix = MutMatrixImpl::new(res.rows, res.cols); let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + while i != res.rows { // Exchange match res.get(i, 0) { Option::Some(res_0) => { new_res.set(i, 1, res_0); }, Option::None => { new_res.set(i, 1, NumberTrait::zero()); }, }; + i += 1; }; + match self.post_transform { POST_TRANSFORM::NONE => { let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + while i != res.rows { // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { @@ -459,57 +419,49 @@ impl TreeEnsembleClassifierImpl< }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; + i += 1; }; }, POST_TRANSFORM::SOFTMAX => { let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + while i != res.rows { // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; + i += 1; }; }, POST_TRANSFORM::LOGISTIC => { let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + while i != res.rows { // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; + i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + while i != res.rows { // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; + i += 1; }; }, POST_TRANSFORM::PROBIT => { let mut i: usize = 0; - loop { - if i == res.rows { - break; - } + while i != res.rows { // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { @@ -518,10 +470,12 @@ impl TreeEnsembleClassifierImpl< }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; + i += 1; }; }, }; + res = new_res; } @@ -537,7 +491,7 @@ impl TreeEnsembleClassifierImpl< // Labels let mut labels = new_scores.argmax(1); - let mut labels_list = ArrayTrait::new(); + let mut labels_list: Array = array![]; loop { match labels.pop_front() { Option::Some(i) => { labels_list.append(*self.classlabels[*i]); }, @@ -545,7 +499,7 @@ impl TreeEnsembleClassifierImpl< }; }; - return (labels_list.span(), new_scores); + (labels_list.span(), new_scores) } } diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index 215ad2a96..9e3a31c50 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -1,28 +1,15 @@ -use core::array::ArrayTrait; -use core::clone::Clone; -use core::box::BoxTrait; -use core::traits::Into; -use core::option::OptionTrait; -use orion::operators::matrix::MutMatrixTrait; -use core::array::SpanTrait; -use core::nullable::NullableTrait; -use core::dict::Felt252DictTrait; -use core::dict::Felt252DictEntryTrait; +use core::debug::PrintTrait; use core::nullable::{match_nullable, FromNullableResult}; - -use orion::operators::tensor::{Tensor, TensorTrait}; -use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; -use orion::numbers::NumberTrait; -use orion::utils::get_row; - use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::{SpanTraitExt}; -use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; +use orion::numbers::NumberTrait; +use orion::operators::matrix::{MutMatrix, MutMatrixTrait, MutMatrixImpl}; +use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; +use orion::operators::tensor::{Tensor, TensorTrait}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; - -use core::debug::PrintTrait; +use orion::utils::get_row; #[derive(Destruct)] struct TreeEnsembleRegressor { @@ -37,7 +24,6 @@ struct TreeEnsembleRegressor { post_transform: POST_TRANSFORM, } - #[derive(Copy, Drop)] enum POST_TRANSFORM { NONE, @@ -258,11 +244,7 @@ impl TreeEnsembleRegressorImpl< let mut target_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; - loop { - if i == self.target_treeids.len() { - break; - } - + while i != self.target_treeids.len() { let tid = *self.target_treeids[i]; let nid = *self.target_nodeids[i]; @@ -283,11 +265,7 @@ impl TreeEnsembleRegressorImpl< }; let mut i: usize = 0; - loop { - if i == res.rows { - break; - } - + while i != res.rows { let mut indices = get_row(@leaves_index, i); let mut t_index: Array> = ArrayTrait::new(); loop { @@ -304,6 +282,7 @@ impl TreeEnsembleRegressorImpl< Option::None => { break; } }; }; + let mut t_index = t_index.span(); match self.aggregate_function { @@ -314,6 +293,7 @@ impl TreeEnsembleRegressorImpl< AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref self, ref res, ref t_index, i); }, AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref self, ref res, ref t_index, i); }, }; + i += 1; }; @@ -321,17 +301,9 @@ impl TreeEnsembleRegressorImpl< if self.base_values.is_some() { let mut base_values = self.base_values.unwrap(); let mut row: usize = 0; - loop { - if row == res.rows { - break; - } - + while row != res.rows { let mut col: usize = 0; - loop { - if col == res.cols { - break; - } - + while col != res.cols { let value = *base_values.pop_front().unwrap(); match res.get(row, col) { Option::Some(val) => { res.set(row, col, val + value); }, @@ -354,11 +326,10 @@ impl TreeEnsembleRegressorImpl< POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; - return new_scores; + new_scores } } - fn compute_res_SUM< T, MAG, @@ -491,10 +462,7 @@ fn compute_res_MIN< i: usize ) { let mut j = 0; - loop { - if j == res.cols { - break; - } + while j != res.cols { res.set(i, j, NumberTrait::max_value()); j += 1; }; @@ -528,7 +496,6 @@ fn compute_res_MIN< }; } - fn compute_res_MAX< T, MAG, @@ -551,10 +518,7 @@ fn compute_res_MAX< i: usize ) { let mut j = 0; - loop { - if j == res.cols { - break; - } + while j != res.cols { res.set(i, j, NumberTrait::min_value()); j += 1; }; diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index 1f1aa0d48..4f9cfc1a8 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -1,8 +1,7 @@ use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; -use orion::operators::tensor::core::{stride}; - fn col2im, +NumberTrait, +Copy, +Drop, +Add, +Mul,>( data: @Tensor, @@ -15,15 +14,13 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { - let mut dilations = ArrayTrait::new(); + let mut dilations: Array = array![]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { dilations.append(1); i += 1; }; + dilations.span() }, }; @@ -31,31 +28,27 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let pads = match pads { Option::Some(pads) => pads, Option::None => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { pads.append(0); pads.append(0); i += 1; }; + pads.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { - let mut strides = ArrayTrait::new(); + let mut strides: Array = array![]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { strides.append(1); i += 1; }; + strides.span() }, }; @@ -65,28 +58,20 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let mut new_shape = array![*(*data).shape.at(0), C, bl]; let mut i = 2; - loop { - if i == (*data).shape.len() { - break; - } + while i != (*data).shape.len() { new_shape.append(*(*data).shape.at(i)); i += 1; }; + let data = data.reshape(new_shape.span()); - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let data_stride = stride(data.shape); let mut n = 0; - loop { - if n == *data.shape.at(0) { - break; - } + while n != *data.shape.at(0) { let mut c = 0; - loop { - if c == *data.shape.at(1) { - break; - } + while c != *data.shape.at(1) { let data_n_c = TensorTrait::new( SpanTrait::slice(data.shape, 2, data.shape.len() - 2), SpanTrait::slice( @@ -97,29 +82,25 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad @data_n_c, image_shape, block_shape, dilations, pads, strides ); let mut i = 0; - loop { - if i == out.len() { - break; - } + while i != out.len() { res.append(out.at(i)); i += 1; }; + c += 1; }; + n += 1; }; let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { new_shape.append(*image_shape.at(i)); i += 1; }; - return TensorTrait::new(new_shape.span(), res.span()); + TensorTrait::new(new_shape.span(), res.span()) } fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { @@ -145,12 +126,9 @@ fn col2im_naive_implementation< col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - let mut dim_col = ArrayTrait::new(); + let mut dim_col: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { dim_col .append( (*image_shape.at(i) @@ -162,6 +140,7 @@ fn col2im_naive_implementation< i += 1; }; + let dim_col = dim_col.span(); let stride_img = stride(image_shape); @@ -172,24 +151,15 @@ fn col2im_naive_implementation< let kernel_size = prod(kernel_shape, 0); let col_size = prod(dim_col, 0); let mut c_col = 0; - loop { - if c_col == kernel_size { - break; - } + while c_col != kernel_size { let offset = get_indices(c_col, kernel_shape).span(); let mut col = 0; - loop { - if col == col_size { - break; - } + while col != col_size { let ind_col = get_indices(col, dim_col).span(); - let mut ind_im = ArrayTrait::new(); + let mut ind_im: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads .at(i) { let neg_index = *pads.at(i) @@ -206,25 +176,26 @@ fn col2im_naive_implementation< i += 1; }; + let ind_im = ind_im.span(); if !is_out(ind_im, image_shape) { let mut index = 0; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { index += *stride_img.at(i) * *ind_im.at(i); i += 1; }; + data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); } + col += 1; }; + c_col += 1; }; - return data_im; + data_im } fn col2im_shape_check, +Copy, +Drop,>( @@ -243,13 +214,10 @@ fn col2im_shape_check, +Copy, +Drop,>( let input_length = *(*X).shape.at(1); let n_dims = output_shape.len(); - let mut n_blocks = ArrayTrait::new(); + let mut n_blocks: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { n_blocks .append( (*output_shape.at(i) @@ -267,15 +235,11 @@ fn col2im_shape_check, +Copy, +Drop,>( assert(input_length == block_size, 'input_length != block_size'); } - fn get_indices(index: usize, shape: Span,) -> Array { let mut i = index; - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let mut k = shape.len() - 1; - loop { - if k == 0 { - break; - } + while k != 0 { let m = i % *shape.at(k); res.append(m); i -= m; @@ -283,17 +247,15 @@ fn get_indices(index: usize, shape: Span,) -> Array { k -= 1; }; - let mut new_res = ArrayTrait::new(); + let mut new_res: Array = array![]; new_res.append(i); let mut i = shape.len() - 1; - loop { - if i == 0 { - break; - } + while i != 0 { new_res.append(*res.at(i - 1)); i -= 1; }; - return new_res; + + new_res } fn is_out(ind: Span, shape: Span,) -> bool { @@ -312,7 +274,8 @@ fn is_out(ind: Span, shape: Span,) -> bool { } n += 1; }; - return is_out; + + is_out } fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( @@ -320,12 +283,10 @@ fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul< ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; + + prod } diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index 926bcb2b5..ac72c336d 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -1,14 +1,10 @@ -use core::traits::Into; -use core::traits::IndexView; -use core::array::ArrayTrait; +use core::debug::PrintTrait; + use orion::numbers::NumberTrait; use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; use orion::operators::tensor::core::{stride}; -use core::clone::Clone; - -use core::debug::PrintTrait; #[derive(Copy, Drop)] enum AUTO_PAD { @@ -41,66 +37,59 @@ fn conv< strides: Option>, ) -> Tensor { let nd = (*X).shape.len() - 2; - + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { - let mut dilations = ArrayTrait::new(); + let mut dilations: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { dilations.append(1); i += 1; }; + dilations.span() }, }; let kernel_shape = match kernel_shape { Option::Some(kernel_shape) => kernel_shape, Option::None => { - let mut kernel_shape = ArrayTrait::new(); + let mut kernel_shape: Array = array![]; let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; + kernel_shape.span() }, }; let pads = match pads { Option::Some(pads) => pads, Option::None => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { pads.append(0); pads.append(0); i += 1; }; + pads.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { - let mut strides = ArrayTrait::new(); + let mut strides: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { strides.append(1); i += 1; }; + strides.span() }, }; @@ -117,8 +106,8 @@ fn conv< if group > 1 { let sN = *(*X).shape.at(0); - let mut res_b = ArrayTrait::new(); - let mut res_cv = ArrayTrait::new(); + let mut res_b: Array = array![]; + let mut res_cv = array![]; let mut td = 0; let mg = *(*W).shape.at(0) / group; @@ -127,37 +116,27 @@ fn conv< let X_stride = stride((*X).shape); let mut gx_shape = array![1, dw]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { gx_shape.append(*(*X).shape.at(i)); i += 1; }; + let gx_shape = gx_shape.span(); let W_stride = stride((*W).shape); let mut gw_shape = array![mg]; let mut i = 1; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { gw_shape.append(*(*W).shape.at(i)); i += 1; }; + let gw_shape = gw_shape.span(); let mut b = 0; - loop { - if b == sN { - break; - } + while b != sN { let mut g = 0; - loop { - if g == group { - break; - } + while g != group { let gx = TensorTrait::new( gx_shape, SpanTrait::slice( @@ -181,13 +160,16 @@ fn conv< Option::Some(pads), Option::Some(strides) ); + if b == 0 { td += *cv.shape.at(1); } + res_b.append(b); res_cv.append(cv); g += 1; }; + b += 1; }; @@ -199,61 +181,47 @@ fn conv< let mut cv = *res_cv.at(0); let mut i = 2; - loop { - if i == cv.shape.len() { - break; - } + while i != cv.shape.len() { final_shape.append(*cv.shape.at(i)); i += 1; }; + let final_shape = final_shape.span(); - let mut final = ArrayTrait::new(); + let mut final: Array = array![]; let mut p = 0; let mut i = 0; - loop { - if i == res_b.len() { - break; - } + while i != res_b.len() { let cv = *res_cv.at(i); let mut n = 0; - loop { - if n == cv.data.len() { - break; - } + while n != cv.data.len() { final.append(*cv.data.at(n)); n += 1; }; + p += *cv.shape.at(1); if p >= td { p = 0; } + i += 1; }; + let final = final.span(); let final = match B { Option::Some(B) => { - let mut final_b = ArrayTrait::new(); + let mut final_b: Array = array![]; let final_stride = stride(final_shape); let mut i = 0; - loop { - if i == *final_shape.at(0) { - break; - } + while i != *final_shape.at(0) { let mut j = 0; - loop { - if j == B.len() { - break; - } + while j != B.len() { let mut k = 0; - loop { - if k == *final_stride.at(1) { - break; - } + while k != *final_stride.at(1) { final_b .append( *final.at(i * *final_stride.at(0) + j * *final_stride.at(1) + k) @@ -261,10 +229,13 @@ fn conv< ); k += 1; }; + j += 1; }; + i += 1; }; + final_b.span() }, Option::None => { final }, @@ -277,37 +248,32 @@ fn conv< if *dilations.at(0) != 1 || min(dilations.clone()) != max(dilations.clone()) { // computation of the dilated kernel let nd = dilations.len(); - let mut new_kernel_shape = ArrayTrait::new(); - let mut new_shape = ArrayTrait::new(); + let mut new_kernel_shape: Array = array![]; + let mut new_shape: Array = array![]; new_shape.append_span(SpanTrait::slice((*W).shape, 0, (*W).shape.len() - nd)); let mut i = 0; - loop { - if i == dilations.len() { - break; - } + while i != dilations.len() { let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; new_shape.append(*(*W).shape.at(di) + (*(*W).shape.at(di) - 1) * (d - 1)); new_kernel_shape.append(*kernel_shape.at(i) + (*kernel_shape.at(i) - 1) * (d - 1)); i += 1; }; + let new_shape = new_shape.span(); let new_w_strides = stride(new_shape); let mut new_w = NullableVecImpl::new(); new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero()); - let mut indices = ArrayTrait::new(); + let mut indices = array![]; indices.append(arange(0, *new_shape.at(0), 1)); indices.append(arange(0, *new_shape.at(1), 1)); let mut i = 0; - loop { - if i == dilations.len() { - break; - } + while i != dilations.len() { let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; indices.append(arange(0, *new_shape.at(di), d)); @@ -316,35 +282,28 @@ fn conv< let set_of_all_indices = cartesian(indices.span()); - let mut new_w_arr = ArrayTrait::new(); + let mut new_w_arr: Array = array![]; let mut i = 0; let mut prev = 0; - loop { - if i == (*W).data.len() { - break; - } + while i != (*W).data.len() { let nd_index = *set_of_all_indices.at(i); let mut flatten_index = 0; let mut j = 0; - loop { - if j == nd_index.len() { - break; - } + while j != nd_index.len() { flatten_index += *nd_index.at(j) * *new_w_strides.at(j); j += 1; }; if flatten_index > prev + 1 { let mut j = prev + 1; - loop { - if j == flatten_index { - break; - } + while j != flatten_index { new_w_arr.append(NumberTrait::zero()); }; + j += 1; } + new_w_arr.append(*(*W).data.at(i)); new_w.set(flatten_index, *(*W).data.at(i)); prev = flatten_index; @@ -355,13 +314,10 @@ fn conv< let pads = match auto_pad { AUTO_PAD::NOTSET => { pads }, AUTO_PAD::SAME_UPPER => { - let mut head = ArrayTrait::new(); - let mut tail = ArrayTrait::new(); + let mut head: Array = array![]; + let mut tail: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; @@ -371,18 +327,16 @@ fn conv< tail.append(pad_tail); i += 1; }; + head.append_span(tail.span()); let pads = head.span(); pads }, AUTO_PAD::SAME_LOWER => { - let mut head = ArrayTrait::new(); - let mut tail = ArrayTrait::new(); + let mut head: Array = array![]; + let mut tail: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; @@ -392,18 +346,16 @@ fn conv< tail.append(pad_tail); i += 1; }; + head.append_span(tail.span()); let pads = head.span(); pads }, AUTO_PAD::VALID => { - let mut head = ArrayTrait::new(); - let mut tail = ArrayTrait::new(); + let mut head: Array = array![]; + let mut tail: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { let d = *(*X).shape.at(i); let target_size = (d + *strides.at(i) - 1) / *strides.at(i); let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; @@ -413,6 +365,7 @@ fn conv< tail.append(pad_tail); i += 1; }; + head.append_span(tail.span()); let pads = head.span(); pads @@ -444,26 +397,19 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == h_out { - break; - } + while k != h_out { res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); k += 1; }; + j += 1; }; + i += 1; }; }, @@ -471,27 +417,15 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice((*W).data, nw * sC * kh + c * kh, kh); let mut io = bh; - loop { - if io >= eh.into() { - break; - } + while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); @@ -510,11 +444,13 @@ fn conv< } else { dot(img, w) }; + let hr = if hr < 0 { *res_strides.at(1) - hr.into() } else { hr.into() }; + res .set( n * *res_strides.at(0) + nw * *res_strides.at(1) + hr, @@ -522,23 +458,26 @@ fn conv< + s ); } + io += sth.into(); }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; + return TensorTrait::new(res_shape, res_data.span()); } @@ -577,26 +516,14 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == h_out { - break; - } + while k != h_out { let mut l = 0; - loop { - if l == w_out { - break; - } + while l != w_out { res .set( i * *res_strides.at(0) @@ -607,10 +534,13 @@ fn conv< ); l += 1; }; + k += 1; }; + j += 1; }; + i += 1; }; }, @@ -618,29 +548,17 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice( (*W).data, nw * (sC * kh * kw) + c * (kh * kw), kh * kw ); let mut io = bh; - loop { - if io >= eh.into() { - break; - } + while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); @@ -648,22 +566,16 @@ fn conv< let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let mut jo = bw; - loop { - if jo >= ew.into() { - break; - } + while jo < ew.into() { let wr = (jo - bw) / stw.into(); if wr < w_out.into() { let j = jo + (kw % 2).into(); let iw1 = I32Number::max(0, j + ow).into(); let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); - let mut img = ArrayTrait::new(); + let mut img: Array = array![]; let mut ihi = ih1; - loop { - if ihi == ih2 { - break; - } + while ihi != ih2 { img .append_span( SpanTrait::slice( @@ -677,6 +589,7 @@ fn conv< ); ihi += 1; }; + let img = img.span(); let s = if w.len() != img.len() { @@ -688,18 +601,16 @@ fn conv< let jw2 = I32Number::min(sW.into() - (j + ow), kw.into()) .into(); - let mut w_ = ArrayTrait::new(); + let mut w_: Array = array![]; let mut jhj = jh1; - loop { - if jhj == jh2 { - break; - } + while jhj != jh2 { w_ .append_span( SpanTrait::slice(w, jhj * kw + jw1, jw2 - jw1) ); jhj += 1; }; + let w_ = w_.span(); assert(w_.len() == img.len(), 'unexpected w and img len'); @@ -740,24 +651,26 @@ fn conv< jo += stw.into(); }; } + io += sth.into(); }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; + return TensorTrait::new(res_shape, res_data.span()); } @@ -806,31 +719,16 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == h_out { - break; - } + while k != h_out { let mut l = 0; - loop { - if l == w_out { - break; - } + while l != w_out { let mut m = 0; - loop { - if m == z_out { - break; - } + while m != z_out { res .set( i * *res_strides.at(0) @@ -842,12 +740,16 @@ fn conv< ); m += 1; }; + l += 1; }; + k += 1; }; + j += 1; }; + i += 1; }; }, @@ -855,29 +757,17 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice( (*W).data, nw * (sC * kh * kw * kz) + c * (kh * kw * kz), kh * kw * kz ); let mut io = bh; - loop { - if io >= eh.into() { - break; - } + while io < eh.into() { let hr = (io - bh) / sth.into(); if hr < h_out.into() { let i = io + (kh % 2).into(); @@ -885,10 +775,7 @@ fn conv< let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); let mut jo = bw; - loop { - if jo >= ew.into() { - break; - } + while jo < ew.into() { let wr = (jo - bw) / stw.into(); if wr < w_out.into() { let j = jo + (kw % 2).into(); @@ -896,10 +783,7 @@ fn conv< let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); let mut zo = bz; - loop { - if zo >= ez.into() { - break; - } + while zo < ez.into() { let zr = (zo - bz) / stz.into(); if zr < z_out.into() { let z = zo + (kz % 2).into(); @@ -907,17 +791,11 @@ fn conv< let iz2 = I32Number::min(z + oz + kz.into(), sW.into()) .into(); - let mut img = ArrayTrait::new(); + let mut img: Array = array![]; let mut ihi = ih1; - loop { - if ihi == ih2 { - break; - } + while ihi != ih2 { let mut iwi = iw1; - loop { - if iwi == iw2 { - break; - } + while iwi != iw2 { img .append_span( SpanTrait::slice( @@ -932,8 +810,10 @@ fn conv< ); iwi += 1; }; + ihi += 1; }; + let img = img.span(); let s = if w.len() != img.len() { @@ -955,17 +835,11 @@ fn conv< ) .into(); - let mut w_ = ArrayTrait::new(); + let mut w_: Array = array![]; let mut jhj = jh1; - loop { - if jhj == jh2 { - break; - } + while jhj != jh2 { let mut jwj = jw1; - loop { - if jwj == jw2 { - break; - } + while jwj != jw2 { w_ .append_span( SpanTrait::slice( @@ -976,8 +850,10 @@ fn conv< ); jwj += 1; }; + jhj += 1; }; + let w_ = w_.span(); assert( @@ -1025,6 +901,7 @@ fn conv< + s ); } + zo += stz.into(); }; } @@ -1032,24 +909,26 @@ fn conv< jo += stw.into(); }; } + io += sth.into(); }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; + return TensorTrait::new(res_shape, res_data.span()); } @@ -1063,18 +942,15 @@ fn conv< let w_stride = stride((*W).shape); let x_stride = stride((*X).shape); - let mut shape_out = ArrayTrait::new(); - let mut o_index = ArrayTrait::::new(); - let mut b_index = ArrayTrait::::new(); - let mut e_index = ArrayTrait::new(); + let mut shape_out: Array = array![]; + let mut o_index: Array = array![]; + let mut b_index: Array = array![]; + let mut e_index: Array = array![]; - let mut range_len = ArrayTrait::new(); + let mut range_len: Array = array![]; let mut i = 0; - loop { - if i == nd { - break; - } + while i != nd { shape_out .append( ((*(*X).shape.at(2 + i) - *kernel_shape.at(i) + *pads.at(i) + *pads.at(i + nd)) @@ -1109,26 +985,19 @@ fn conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == sN { - break; - } + while i != sN { let mut j = 0; - loop { - if j == sM { - break; - } + while j != sM { let b_j = *B.at(j); let mut k = 0; - loop { - if k == *res_strides.at(1) { - break; - } + while k != *res_strides.at(1) { res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); k += 1; }; + j += 1; }; + i += 1; }; }, @@ -1136,37 +1005,22 @@ fn conv< } let mut n = 0; - loop { - if n == sN { - break; - } + while n != sN { let mut nw = 0; - loop { - if nw == sM { - break; - } + while nw != sM { let mut c = 0; - loop { - if c == sC { - break; - } + while c != sC { let w = SpanTrait::slice( (*W).data, nw * *w_stride.at(0) + c * *w_stride.at(1), *w_stride.at(1) ); let mut i = 0; - loop { - if i == *range_len.at(0) * *range_stride.at(0) { - break; - } - let mut io_index = ArrayTrait::::new(); - let mut r_index = ArrayTrait::::new(); + while i != *range_len.at(0) * *range_stride.at(0) { + let mut io_index: Array = array![]; + let mut r_index: Array = array![]; let mut flatten_index = i; let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { let (n_index, rem) = DivRem::div_rem( flatten_index, (*range_stride.at(nx)).try_into().unwrap() ); @@ -1179,16 +1033,13 @@ fn conv< }; if r_index_check(r_index.span(), shape_out) { - let mut indices = ArrayTrait::::new(); - let mut i1_index = ArrayTrait::new(); - let mut i2_index = ArrayTrait::new(); - let mut idiff_index = ArrayTrait::new(); + let mut indices: Array = array![]; + let mut i1_index: Array = array![]; + let mut i2_index: Array = array![]; + let mut idiff_index: Array = array![]; let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { indices.append(*io_index.at(nx) + (*kernel_shape.at(nx) % 2).into()); i1_index .append( @@ -1210,8 +1061,9 @@ fn conv< } nx += 1; }; + let i1_index = i1_index.span(); - let mut img = ArrayTrait::new(); + let mut img: Array = array![]; let img = if nx == 1 { let img = SpanTrait::slice( @@ -1224,18 +1076,12 @@ fn conv< let i_stride = stride(idiff_index.span()); let mut ii = 0; - loop { - if ii == *i_stride.at(0) * *idiff_index.at(0) { - break; - } + while ii != *i_stride.at(0) * *idiff_index.at(0) { let mut flatten_index = ii; let mut start = n * *x_stride.at(0) + c * *x_stride.at(1); let mut nx = 0; - loop { - if nx == nd - 1 { - break; - } + while nx != nd - 1 { let (ii_index, rem) = DivRem::div_rem( flatten_index, (*i_stride.at(nx)).try_into().unwrap() ); @@ -1244,6 +1090,7 @@ fn conv< start += (*i1_index.at(nx) + ii_index) * *x_stride.at(2 + nx); nx += 1; }; + img .append_span( SpanTrait::slice( @@ -1254,19 +1101,17 @@ fn conv< ); ii += 1; }; + img.span() }; let s = if w.len() != img.len() { - let mut j1_index = ArrayTrait::new(); - let mut j2_index = ArrayTrait::new(); - let mut jdiff_index = ArrayTrait::new(); + let mut j1_index: Array = array![]; + let mut j2_index: Array = array![]; + let mut jdiff_index: Array = array![]; let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { j1_index .append( I32Number::max(0, -*indices.at(nx) - *o_index.at(nx)).into() @@ -1286,9 +1131,10 @@ fn conv< } nx += 1; }; + let j1_index = j1_index.span(); - let mut w_ = ArrayTrait::new(); + let mut w_: Array = array![]; let w_ = if nx == 1 { let w_ = SpanTrait::slice( @@ -1301,18 +1147,12 @@ fn conv< let j_stride = stride(jdiff_index.span()); let mut jj = 0; - loop { - if jj == *j_stride.at(0) * *jdiff_index.at(0) { - break; - } + while jj != *j_stride.at(0) * *jdiff_index.at(0) { let mut flatten_index = jj; let mut start = 0; let mut nx = 0; - loop { - if nx == nd - 1 { - break; - } + while nx != nd - 1 { let (jj_index, rem) = DivRem::div_rem( flatten_index, (*j_stride.at(nx)).try_into().unwrap() ); @@ -1331,8 +1171,10 @@ fn conv< ); jj += 1; }; + w_.span() }; + dot(img, w_) } else { dot(img, w) @@ -1341,37 +1183,35 @@ fn conv< let mut res_index = n * *res_strides.at(0) + nw * *res_strides.at(1); let mut nx = 0; - loop { - if nx == nd { - break; - } + while nx != nd { res_index += (*r_index.at(nx)).into() * *res_strides.at(2 + nx); nx += 1; }; res.set(res_index, res.at(res_index) + s); }; + i += 1 }; + c += 1; }; + nw += 1; }; + n += 1; }; - let mut res_data = ArrayTrait::new(); + let mut res_data: Array = array![]; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; - return TensorTrait::new(res_shape, res_data.span()); -} + TensorTrait::new(res_shape, res_data.span()) +} fn r_index_check(r_index: Span, shape_out: Span) -> bool { let mut i = 0; @@ -1384,25 +1224,22 @@ fn r_index_check(r_index: Span, shape_out: Span) -> bool { } i += 1; }; - return flag; -} + flag +} fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; -} + prod +} fn min(mut a: Span) -> usize { assert(a.len() > 0, 'span cannot be empty'); @@ -1410,33 +1247,24 @@ fn min(mut a: Span) -> usize { let mut min = *a.at(0); loop { match a.pop_front() { - Option::Some(v) => { - if *v < min { - min = *v; - }; - }, - Option::None => { - break min; - } + Option::Some(v) => { if *v < min { + min = *v; + }; }, + Option::None => { break min; } }; } } - fn max(mut a: Span) -> usize { assert(a.len() > 0, 'span cannot be empty'); let mut max = *a.at(0); loop { match a.pop_front() { - Option::Some(v) => { - if *v > max { - max = *v; - }; - }, - Option::None => { - break max; - } + Option::Some(v) => { if *v > max { + max = *v; + }; }, + Option::None => { break max; } }; } } @@ -1444,16 +1272,14 @@ fn max(mut a: Span) -> usize { fn arange(start: usize, end: usize, step: usize) -> Span { assert((end - start) % step == 0, 'incompatible step value'); - let mut arr = ArrayTrait::new(); + let mut arr: Array = array![]; let mut i = start; - loop { - if i >= end { - break; - } + while i < end { arr.append(i); i += step; }; - return arr.span(); + + arr.span() } @@ -1469,24 +1295,18 @@ fn cartesian(mut arrays: Span>,) -> Span> { }; let mut i = 0; - let mut size_arrays = ArrayTrait::new(); - loop { - if i == arrays.len() { - break; - } + let mut size_arrays: Array = array![]; + while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); - i += 1; }; + let size_arrays = size_arrays.span(); - let mut output_arrays = ArrayTrait::>::new(); + let mut output_arrays = array![]; let mut m = n; let mut i = 0; - loop { - if i == arrays.len() { - break; - } + while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); @@ -1494,74 +1314,58 @@ fn cartesian(mut arrays: Span>,) -> Span> { output_arrays.append(out); i += 1; }; + let output_arrays = output_arrays.span(); let mut i = 0; let mut ret = ArrayTrait::new(); - loop { - if i == n { - break; - } + while i != n { let mut j = 0; - let mut x = ArrayTrait::new(); - loop { - if j == arrays.len() { - break; - } - + let mut x: Array = array![]; + while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; + ret.append(x.span()); i += 1; }; - return ret.span(); + ret.span() } fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { let mut size = array.len(); let mut i = 0; - loop { - if i == index { - break; - } + while i != index { let mut j = 1; - loop { - if j == *size_array.at(index - 1 - i) { - break; - } + while j != *size_array.at(index - 1 - i) { let mut k = 0; - loop { - if k == size { - break; - } + while k != size { array.append(*array.at(k)); k += 1; }; + j += 1; }; + size = size * *size_array.at(index - 1 - i); i += 1; }; + array } fn repeat(array: Span, m: usize,) -> Array { - let mut out = ArrayTrait::new(); + let mut out: Array = array![]; let mut j = 0; - loop { - if j == array.len() { - break; - } + while j != array.len() { let mut k = 0; - loop { - if k == m { - break; - } + while k != m { out.append(*array.at(j)); k += 1; }; + j += 1; }; @@ -1575,13 +1379,10 @@ fn dot< ) -> T { let mut i = 0; let mut sum = NumberTrait::zero(); - loop { - if i == a.len() { - break; - } + while i != a.len() { sum = sum + *a.at(i) * *b.at(i); i += 1; }; - return sum; + sum } diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index bd324e0d6..f8f810558 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -1,7 +1,7 @@ use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; -use orion::operators::tensor::core::{stride}; #[derive(Copy, Drop)] enum AUTO_PAD { @@ -33,61 +33,53 @@ fn conv_transpose< let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { - let mut dilations = ArrayTrait::new(); + let mut dilations: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { dilations.append(1); i += 1; }; + dilations.span() }, }; let kernel_shape = match kernel_shape { Option::Some(kernel_shape) => kernel_shape, Option::None => { - let mut kernel_shape = ArrayTrait::new(); + let mut kernel_shape: Array = array![]; let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; + kernel_shape.span() }, }; let output_padding = match output_padding { Option::Some(output_padding) => output_padding, Option::None => { - let mut output_padding = ArrayTrait::new(); + let mut output_padding: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { output_padding.append(0); output_padding.append(0); i += 1; }; + output_padding.span() }, }; let strides = match strides { Option::Some(strides) => strides, Option::None => { - let mut strides = ArrayTrait::new(); + let mut strides: Array = array![]; let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { strides.append(1); i += 1; }; + strides.span() }, }; @@ -98,12 +90,9 @@ fn conv_transpose< let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -113,23 +102,23 @@ fn conv_transpose< ); i += 1; }; + output_shape.span() }, }; + (pads, n_dims, output_shape) }, Option::None => { let (pads, n_dims, output_shape) = match auto_pad { AUTO_PAD::NOTSET => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == strides.len() * 2 { - break; - } + while i != strides.len() * 2 { pads.append(0); i += 1; }; + let pads = pads.span(); let n_dims = (*X).shape.len() - 2; @@ -137,13 +126,9 @@ fn conv_transpose< let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } - + while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -153,6 +138,7 @@ fn conv_transpose< ); i += 1; }; + output_shape.span() }, }; @@ -163,25 +149,20 @@ fn conv_transpose< let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == strides.len() { - break; - } + while i != strides.len() { output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); i += 1; }; + output_shape.span() }, }; - let mut total_padding = ArrayTrait::new(); + let mut total_padding: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { total_padding .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -191,51 +172,43 @@ fn conv_transpose< ); i += 1; }; + let total_padding = total_padding.span(); - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) / 2); i += 1; }; + let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); i += 1; }; + (pads.span(), pads.len() / 2, output_shape) }, AUTO_PAD::SAME_LOWER => { let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == strides.len() { - break; - } + while i != strides.len() { output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); i += 1; }; + output_shape.span() }, }; - let mut total_padding = ArrayTrait::new(); + let mut total_padding: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { total_padding .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -245,50 +218,42 @@ fn conv_transpose< ); i += 1; }; + let total_padding = total_padding.span(); - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); i += 1; }; + let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { pads.append(*total_padding.at(i) / 2); i += 1; }; + (pads.span(), pads.len() / 2, output_shape) }, AUTO_PAD::VALID => { - let mut pads = ArrayTrait::new(); + let mut pads: Array = array![]; let mut i = 0; - loop { - if i == strides.len() * 2 { - break; - } + while i != strides.len() * 2 { pads.append(0); i += 1; }; + let pads = pads.span(); let n_dims = (*X).shape.len() - 2; let output_shape = match output_shape { Option::Some(output_shape) => output_shape, Option::None => { - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { output_shape .append( (*(*X).shape.at(i + 2) - 1) * *strides.at(i) @@ -298,12 +263,15 @@ fn conv_transpose< ); i += 1; }; + output_shape.span() }, }; + (pads, n_dims, output_shape) }, }; + (pads, n_dims, output_shape) }, }; @@ -312,15 +280,13 @@ fn conv_transpose< Option::None => { 1 }, }; - let mut kernel_shape = ArrayTrait::new(); + let mut kernel_shape: Array = array![]; let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; + let kernel_shape = kernel_shape.span(); let kernel_size = prod(kernel_shape, 0); @@ -332,14 +298,11 @@ fn conv_transpose< let n = prod((*X).shape, 2); let k = C / group; - let mut final = ArrayTrait::new(); + let mut final: Array = array![]; if group == 1 { let mut image_id = 0; - loop { - if image_id == *(*X).shape.at(0) { - break; - } + while image_id != *(*X).shape.at(0) { let w_t = TensorTrait::new(array![k, m].span(), (*W).data) .transpose(array![1, 0].span()); @@ -349,10 +312,7 @@ fn conv_transpose< let gemmc = gemm .reshape(array![num_output_channels, m / num_output_channels, n].span()); let mut c = 0; - loop { - if c == num_output_channels { - break; - } + while c != num_output_channels { let gemmc_c = TensorTrait::new( array![m / num_output_channels, n].span(), SpanTrait::slice( @@ -367,103 +327,78 @@ fn conv_transpose< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res.set(i, res.at(i) + *(*B).data.at(c)); i += 1; }; }, Option::None => {}, } + c += 1; let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { final.append(res.at(i)); i += 1; }; }; + image_id += 1; }; } else { - let mut output_array = ArrayTrait::new(); + let mut output_array: Array> = array![]; let mut i = 0; let mut output_size = 1; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { output_size *= *output_shape.at(i); i += 1; }; // Computation of conv transposition per group let mut group_id = 0; - loop { - if group_id == group { - break; - } - let mut group_X = ArrayTrait::new(); - let mut group_W = ArrayTrait::new(); + while group_id != group { + let mut group_X: Array = array![]; + let mut group_W: Array = array![]; let mut image_id = 0; - loop { - if image_id == *(*X).shape.at(0) { - break; - } + while image_id != *(*X).shape.at(0) { let start = image_id * n * C + (group_id * C / group) * n; let end = image_id * n * C + ((group_id + 1) * C / group) * n; let mut i = start; - loop { - if i == end { - break; - } + while i != end { group_X.append(*(*X).data.at(i)); - i += 1; }; + image_id += 1; }; let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; let mut i = start; - loop { - if i == end { - break; - } + while i != end { group_W.append(*(*W).data.at(i)); i += 1; }; - let mut shape_X = ArrayTrait::new(); + let mut shape_X: Array = array![]; shape_X.append(*(*X).shape.at(0)); shape_X.append(C / group); let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { shape_X.append(*(*X).shape.at(i)); i += 1; }; - let mut shape_W = ArrayTrait::new(); + let mut shape_W: Array = array![]; shape_W.append(C / group); let mut i = 1; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { shape_W.append(*(*W).shape.at(i)); i += 1; }; @@ -492,47 +427,39 @@ fn conv_transpose< group_id += 1; }; + let output_array = output_array.span(); // Sorting result per item of the batch // output size : N (batch size) x num_output_channels x output_shape let mut image_id = 0; - loop { - if image_id == *(*X).shape.at(0) { - break; - } + while image_id != *(*X).shape.at(0) { let mut group_id = 0; - loop { - if group_id == group { - break; - } + while group_id != group { let group_output = *output_array.at(group_id); let mut i = image_id * output_size * (num_output_channels / group); - loop { - if i == (image_id + 1) * output_size * (num_output_channels / group) { - break; - } + while i != (image_id + 1) * output_size * (num_output_channels / group) { final.append(*group_output.at(i)); i += 1; }; + group_id += 1; }; + image_id += 1; }; } + let mut shape = array![*(*X).shape.at(0), num_output_channels]; let mut i = 0; - loop { - if i == output_shape.len() { - break; - } + while i != output_shape.len() { shape.append(*output_shape.at(i)); i += 1; }; - return TensorTrait::new(shape.span(), final.span()); + TensorTrait::new(shape.span(), final.span()) } fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { @@ -558,12 +485,9 @@ fn col2im_naive_implementation< col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - let mut dim_col = ArrayTrait::new(); + let mut dim_col: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { dim_col .append( (*image_shape.at(i) @@ -575,6 +499,7 @@ fn col2im_naive_implementation< i += 1; }; + let dim_col = dim_col.span(); let stride_img = stride(image_shape); @@ -585,24 +510,15 @@ fn col2im_naive_implementation< let kernel_size = prod(kernel_shape, 0); let col_size = prod(dim_col, 0); let mut c_col = 0; - loop { - if c_col == kernel_size { - break; - } + while c_col != kernel_size { let offset = get_indices(c_col, kernel_shape).span(); let mut col = 0; - loop { - if col == col_size { - break; - } + while col != col_size { let ind_col = get_indices(col, dim_col).span(); - let mut ind_im = ArrayTrait::new(); + let mut ind_im: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads .at(i) { let neg_index = *pads.at(i) @@ -619,25 +535,26 @@ fn col2im_naive_implementation< i += 1; }; + let ind_im = ind_im.span(); if !is_out(ind_im, image_shape) { let mut index = 0; let mut i = 0; - loop { - if i == image_shape.len() { - break; - } + while i != image_shape.len() { index += *stride_img.at(i) * *ind_im.at(i); i += 1; }; + data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); } + col += 1; }; + c_col += 1; }; - return data_im; + data_im } fn col2im_shape_check, +Copy, +Drop,>( @@ -656,13 +573,10 @@ fn col2im_shape_check, +Copy, +Drop,>( let input_length = *(*X).shape.at(1); let n_dims = output_shape.len(); - let mut n_blocks = ArrayTrait::new(); + let mut n_blocks: Array = array![]; let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { n_blocks .append( (*output_shape.at(i) @@ -683,12 +597,9 @@ fn col2im_shape_check, +Copy, +Drop,>( fn get_indices(index: usize, shape: Span,) -> Array { let mut i = index; - let mut res = ArrayTrait::new(); + let mut res: Array = array![]; let mut k = shape.len() - 1; - loop { - if k == 0 { - break; - } + while k != 0 { let m = i % *shape.at(k); res.append(m); i -= m; @@ -696,17 +607,15 @@ fn get_indices(index: usize, shape: Span,) -> Array { k -= 1; }; - let mut new_res = ArrayTrait::new(); + let mut new_res: Array = array![]; new_res.append(i); let mut i = shape.len() - 1; - loop { - if i == 0 { - break; - } + while i != 0 { new_res.append(*res.at(i - 1)); i -= 1; }; - return new_res; + + new_res } fn is_out(ind: Span, shape: Span,) -> bool { @@ -725,22 +634,20 @@ fn is_out(ind: Span, shape: Span,) -> bool { } n += 1; }; - return is_out; -} + is_out +} fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; + + prod } diff --git a/src/operators/nn/functional/depth_to_space.cairo b/src/operators/nn/functional/depth_to_space.cairo index c9efe3f66..161ea46ad 100644 --- a/src/operators/nn/functional/depth_to_space.cairo +++ b/src/operators/nn/functional/depth_to_space.cairo @@ -1,15 +1,9 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - /// Cf: NNTrait::depth_to_space docstring fn depth_to_space< T, @@ -24,23 +18,26 @@ fn depth_to_space< >( tensor: Tensor, blocksize: usize, mode: felt252 ) -> Tensor { - assert!((tensor.shape).len() == 4, "Unexpected shape 4."); + assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); + let b = (tensor.shape).at(0); let C = (tensor.shape).at(1); let H = (tensor.shape).at(2); let W = (tensor.shape).at(3); let finalshape = array![*b, *C / (blocksize * blocksize), *H * blocksize, *W * blocksize]; + if mode == 'DCR' { let tmpshape = array![*b, blocksize, blocksize, *C / (blocksize * blocksize), *H, *W]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); - return transposed.reshape(target_shape: finalshape.span()); - } - else { + + transposed.reshape(target_shape: finalshape.span()) + } else { // assert mode == "CRD" let tmpshape = array![*b, *C / (blocksize * blocksize), blocksize, blocksize, *H, *W]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); - return transposed.reshape(target_shape: finalshape.span()); + + transposed.reshape(target_shape: finalshape.span()) } } diff --git a/src/operators/nn/functional/gemm.cairo b/src/operators/nn/functional/gemm.cairo index c37bda880..ffd4ca46c 100644 --- a/src/operators/nn/functional/gemm.cairo +++ b/src/operators/nn/functional/gemm.cairo @@ -1,5 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::SpanTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{core::{Tensor, TensorTrait}, math::arithmetic::mul_by_scalar}; @@ -39,11 +38,11 @@ fn gemm< NumberTrait::one() }; - if transA == true { + if transA { A = A.transpose(array![1, 0].span()); } - if transB == true { + if transB { B = B.transpose(array![1, 0].span()); } @@ -57,8 +56,8 @@ fn gemm< let c = Tensor { shape: broadcast_c_shape, data: c.data }; - return mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta); + mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta) }, - Option::None => { return mul_by_scalar(@A.matmul(@B), alpha); } + Option::None => { mul_by_scalar(@A.matmul(@B), alpha) } } } diff --git a/src/operators/nn/functional/grid_sample.cairo b/src/operators/nn/functional/grid_sample.cairo index ed1cb01b6..aed560e37 100644 --- a/src/operators/nn/functional/grid_sample.cairo +++ b/src/operators/nn/functional/grid_sample.cairo @@ -1,12 +1,10 @@ -use core::option::OptionTrait; -use core::traits::TryInto; -use orion::numbers::NumberTrait; -use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; -use orion::operators::vec::{NullableVec, NullableVecImpl}; -use orion::operators::tensor::core::{stride}; use core::debug::PrintTrait; + use orion::numbers::FP16x16; -use orion::operators::tensor::{FP16x16Tensor}; +use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{stride}; +use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; #[derive(Copy, Drop)] enum MODE { @@ -77,7 +75,7 @@ fn grid_sample< let border = prepare_border(X, dims, align_corner); - let mut y_dims = array![N, C]; + let mut y_dims: Array = array![N, C]; y_dims.append_span(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); let y_dims = y_dims.span(); @@ -85,32 +83,23 @@ fn grid_sample< return TensorTrait::new(array![].span(), array![].span()); } - let mut Y = ArrayTrait::new(); + let mut Y: Array = array![]; let mut n = 0; - loop { - if n == N { - break; - } + while n != N { let grid_data = SpanTrait::slice((*grid).data, n * *grid_stride.at(0), *grid_stride.at(0)); let grid_data_stride = SpanTrait::slice(grid_stride, 1, grid_stride.len() - 1); let mut c = 0; - loop { - if c == C { - break; - } + while c != C { let X_data = SpanTrait::slice( (*X).data, n * *x_stride.at(0) + c * *x_stride.at(1), *x_stride.at(1) ); let X_data_stride = SpanTrait::slice(x_stride, 2, grid_stride.len() - 2); let all_coords = get_all_coords(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); - let mut ix = 0; - loop { - if ix == all_coords.len() { - break; - } + let mut ix = 0; + while ix != all_coords.len() { let ox = *all_coords.at(ix); let nx = get_sub(grid_data, grid_data_stride, ox); let nx = reverse(nx); @@ -122,14 +111,10 @@ fn grid_sample< MODE::CUBIC => { x }, }; - let mut new_x = ArrayTrait::new(); + let mut new_x: Array = array![]; let mut i = 0; - loop { - if i == x.len() { - break; - } + while i != x.len() { let v = *x.at(i); - let mut x_min = *border.at(i); let mut x_max = *border.at(i + num_dims); let new_v = if v < x_min || v > x_max { @@ -149,9 +134,11 @@ fn grid_sample< } else { v }; + new_x.append(new_v); i += 1; }; + let x = new_x.span(); let y = match mode { @@ -169,15 +156,18 @@ fn grid_sample< ) }, }; - Y.append(y); + Y.append(y); ix += 1; }; + c += 1; }; + n += 1; }; - return TensorTrait::new(y_dims, Y.span()); + + TensorTrait::new(y_dims, Y.span()) } fn gs_cubic_interpolation_1d_with_x< @@ -213,9 +203,9 @@ fn gs_cubic_interpolation_1d_with_x< let v_2 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); let v_3 = pixel_at_array(data, x_2.try_into().unwrap(), border, padding_mode); - let v = array![v_0, v_1, v_2, v_3].span(); + let v: Span = array![v_0, v_1, v_2, v_3].span(); - return dot(coeffs, v); + dot(coeffs, v) } fn gs_get_cubic_coeffs< @@ -245,7 +235,7 @@ fn gs_get_cubic_coeffs< let A = NumberTrait::neg(three / four); let x = NumberTrait::abs(x); - let mut coeffs = ArrayTrait::new(); + let mut coeffs: Array = array![]; coeffs.append(((A * (x + one) - five * A) * (x + one) + eigth * A) * (x + one) - four * A); coeffs.append(((A + two) * x - (A + three)) * x * x + one); @@ -255,7 +245,8 @@ fn gs_get_cubic_coeffs< ((A * ((one - x) + one) - five * A) * ((one - x) + one) + eigth * A) * ((one - x) + one) - four * A ); - return coeffs.span(); + + coeffs.span() } fn gs_cubic_interpolation_nd_with_x< @@ -294,13 +285,10 @@ fn gs_cubic_interpolation_nd_with_x< return a; } - let mut res1d = ArrayTrait::new(); + let mut res1d: Array = array![]; let mut i = 0; - loop { - if i == *data_dims.at(0) { - break; - } + while i != *data_dims.at(0) { let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); let sub_x = SpanTrait::slice(x, 1, x.len() - 1); @@ -316,23 +304,23 @@ fn gs_cubic_interpolation_nd_with_x< let r = gs_cubic_interpolation_nd_with_x( sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode ); + res1d.append(r); i += 1; }; - return gs_cubic_interpolation_1d_with_x( + gs_cubic_interpolation_1d_with_x( res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode - ); + ) } - fn gs_get_linear_coeffs, +Copy, +NumberTrait, +Sub,>( x: T ) -> Span { let x = NumberTrait::abs(x); - return array![NumberTrait::one() - x, x].span(); -} + array![NumberTrait::one() - x, x].span() +} fn gs_linear_interpolation_1d_with_x< T, @@ -362,9 +350,9 @@ fn gs_linear_interpolation_1d_with_x< let v_0 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode); let v_1 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); - let v = array![v_0, v_1].span(); + let v: Span = array![v_0, v_1].span(); - return dot(coeffs, v); + dot(coeffs, v) } fn dot, +Copy, +NumberTrait, +Add, +TensorTrait, +Mul,>( @@ -374,18 +362,14 @@ fn dot, +Copy, +NumberTrait, +Add, +TensorTrait = array![]; let mut i = 0; - loop { - if i == *data_dims.at(0) { - break; - } + while i != *data_dims.at(0) { let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); let sub_x = SpanTrait::slice(x, 1, x.len() - 1); @@ -443,16 +424,16 @@ fn gs_linear_interpolation_nd_with_x< let r = gs_linear_interpolation_nd_with_x( sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode ); + res1d.append(r); i += 1; }; - return gs_linear_interpolation_1d_with_x( + gs_linear_interpolation_1d_with_x( res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode - ); + ) } - fn pixel_at_ndarray< T, MAG, @@ -525,7 +506,7 @@ fn pixel_at_ndarray< border.append_span(border1); border.append_span(border2); - return pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode); + pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode) } fn pixel_at_array< @@ -571,21 +552,18 @@ fn pixel_at_array< }, }; - return pixel; + pixel } fn zeros, +Copy, +NumberTrait>(n: usize) -> Span { - let mut zeros = ArrayTrait::new(); + let mut zeros: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { zeros.append(NumberTrait::zero()); i += 1; }; - return zeros.span(); + zeros.span() } fn rint< @@ -604,14 +582,11 @@ fn rint< data: Span ) -> Span { // round to nearest if ties rounds to the nearest even value. - let mut rint = ArrayTrait::new(); + let mut rint: Array = array![]; let two: T = NumberTrait::one() + NumberTrait::one(); let mut i = 0; - loop { - if i == data.len() { - break; - } + while i != data.len() { let x = *data.at(i); let mut round = NumberTrait::round(x); @@ -621,11 +596,12 @@ fn rint< round -= NumberTrait::one() } } + rint.append(round); i += 1; }; - return rint.span(); + rint.span() } fn clamp, +Copy, +NumberTrait, +PartialOrd>( @@ -634,10 +610,12 @@ fn clamp, +Copy, +NumberTrait, +PartialOrd>( if val < low { return low; } + if val > high { return high; } - return val; + + val } fn gs_reflect< @@ -686,23 +664,18 @@ fn gs_reflect< fx }; - return fx; + fx } - fn reverse, +Drop,>(data: Span) -> Span { - let mut rev = ArrayTrait::new(); + let mut rev: Array = array![]; let mut i = data.len(); - loop { - if i == 0 { - break; - } + while i != 0 { rev.append(*data.at(i - 1)); - i -= 1; }; - return rev.span(); + rev.span() } fn get_sub, +Drop,>( @@ -710,34 +683,26 @@ fn get_sub, +Drop,>( ) -> Span { let mut acc_indices = 0; let mut i = 0; - loop { - if i == index.len() { - break; - } + while i != index.len() { acc_indices += *index.at(i) * *stride_data.at(i); - i += 1; }; - return SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)); + SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)) } - fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } + while i != pA.len() { prod = prod * (*pA.at(i)); i += 1; }; - return prod; -} + prod +} fn prepare_border< T, @@ -757,14 +722,11 @@ fn prepare_border< ) -> Span { let num_dims = dims.len(); - let mut borders1 = ArrayTrait::new(); - let mut borders2 = ArrayTrait::new(); + let mut borders1: Array = array![]; + let mut borders2: Array = array![]; let mut i = 0; - loop { - if i == num_dims { - break; - } + while i != num_dims { if align_corner == 0 { borders1.append(-NumberTrait::half()); borders2 @@ -778,26 +740,26 @@ fn prepare_border< NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::one() ); } + i += 1; }; + borders1.append_span(borders2.span()); - return borders1.span(); + + borders1.span() } fn arange(start: usize, end: usize, step: usize) -> Span { assert((end - start) % step == 0, 'incompatible step value'); - let mut arr = ArrayTrait::new(); + let mut arr: Array = array![]; let mut i = start; - loop { - if i >= end { - break; - } + while i != end { arr.append(i); i += step; }; - return arr.span(); -} + arr.span() +} fn gs_denormalize_coordinates< T, @@ -814,20 +776,17 @@ fn gs_denormalize_coordinates< >( n: Span, dims: Span, align_corner: usize ) -> Span { - let mut x = ArrayTrait::new(); + let mut x: Array = array![]; let mut i = 0; - loop { - if i == n.len() { - break; - } + while i != n.len() { let v = *n.at(i); let dim = *dims.at(i); x.append(gs_denormalize(v, dim, align_corner)); i += 1; }; - return x.span(); + x.span() } fn gs_denormalize< @@ -854,22 +813,19 @@ fn gs_denormalize< (n + NumberTrait::one()) / two * (length - NumberTrait::one()) }; - return x; + x } fn get_all_coords(shape: Span) -> Span> { - let mut all_indices = ArrayTrait::new(); + let mut all_indices = array![]; let mut i = 0; - loop { - if i == shape.len() { - break; - } + while i != shape.len() { all_indices.append(arange(0, *shape.at(i), 1)); i += 1; }; - return cartesian(all_indices.span()); + cartesian(all_indices.span()) } fn cartesian(mut arrays: Span>,) -> Span> { @@ -884,24 +840,18 @@ fn cartesian(mut arrays: Span>,) -> Span> { }; let mut i = 0; - let mut size_arrays = ArrayTrait::new(); - loop { - if i == arrays.len() { - break; - } + let mut size_arrays: Array = array![]; + while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); - i += 1; }; + let size_arrays = size_arrays.span(); let mut output_arrays = ArrayTrait::>::new(); let mut m = n; let mut i = 0; - loop { - if i == arrays.len() { - break; - } + while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); @@ -909,75 +859,58 @@ fn cartesian(mut arrays: Span>,) -> Span> { output_arrays.append(out); i += 1; }; + let output_arrays = output_arrays.span(); let mut i = 0; - let mut ret = ArrayTrait::new(); - loop { - if i == n { - break; - } + let mut ret = array![]; + while i != n { let mut j = 0; let mut x = ArrayTrait::new(); - loop { - if j == arrays.len() { - break; - } - + while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; + ret.append(x.span()); i += 1; }; - return ret.span(); + ret.span() } - fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { let mut size = array.len(); let mut i = 0; - loop { - if i == index { - break; - } + while i != index { let mut j = 1; - loop { - if j == *size_array.at(index - 1 - i) { - break; - } + while j != *size_array.at(index - 1 - i) { let mut k = 0; - loop { - if k == size { - break; - } + while k != size { array.append(*array.at(k)); k += 1; }; + j += 1; }; + size = size * *size_array.at(index - 1 - i); i += 1; }; + array } fn repeat(array: Span, m: usize,) -> Array { - let mut out = ArrayTrait::new(); + let mut out: Array = array![]; let mut j = 0; - loop { - if j == array.len() { - break; - } + while j != array.len() { let mut k = 0; - loop { - if k == m { - break; - } + while k != m { out.append(*array.at(j)); k += 1; }; + j += 1; }; diff --git a/src/operators/nn/functional/hard_sigmoid.cairo b/src/operators/nn/functional/hard_sigmoid.cairo index bd9714757..8a368e3be 100644 --- a/src/operators/nn/functional/hard_sigmoid.cairo +++ b/src/operators/nn/functional/hard_sigmoid.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::hard_sigmoid docstring fn hard_sigmoid< @@ -23,7 +17,7 @@ fn hard_sigmoid< >( mut x: Tensor, alpha: @T, beta: @T ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match x.data.pop_front() { @@ -36,6 +30,6 @@ fn hard_sigmoid< }; }; - return TensorTrait::new(x.shape, data_result.span()); + TensorTrait::new(x.shape, data_result.span()) } diff --git a/src/operators/nn/functional/leaky_relu.cairo b/src/operators/nn/functional/leaky_relu.cairo index d1677d48f..113383deb 100644 --- a/src/operators/nn/functional/leaky_relu.cairo +++ b/src/operators/nn/functional/leaky_relu.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::leaky_relu docstring fn leaky_relu< @@ -23,7 +17,7 @@ fn leaky_relu< ) -> Tensor { assert(*alpha < NumberTrait::one(), 'alpha must be less than 1'); - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -38,5 +32,5 @@ fn leaky_relu< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/linear.cairo b/src/operators/nn/functional/linear.cairo index fcc616f2c..c01deb87d 100644 --- a/src/operators/nn/functional/linear.cairo +++ b/src/operators/nn/functional/linear.cairo @@ -1,5 +1,3 @@ -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -20,5 +18,5 @@ fn linear< let dot = weights.matmul(@z); let sum = dot + bias; - return sum; + sum } diff --git a/src/operators/nn/functional/logsoftmax.cairo b/src/operators/nn/functional/logsoftmax.cairo index 1280e7cd3..fdf89c43d 100644 --- a/src/operators/nn/functional/logsoftmax.cairo +++ b/src/operators/nn/functional/logsoftmax.cairo @@ -1,8 +1,6 @@ -use core::array::SpanTrait; - +use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; -use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; /// Cf: NNTrait::logsoftmax docstring @@ -16,7 +14,7 @@ fn logsoftmax< let softmax = exp_tensor / sum; let logsoftmax = softmax.log(); - return logsoftmax; + logsoftmax } /// Cf: NNTrait::logsoftmax docstring @@ -42,5 +40,6 @@ fn logsoftmaxWide< let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor.reduce_sum(axis, true); let softmax = div_downcast(@exp_tensor, @sum); + softmax.log() } diff --git a/src/operators/nn/functional/relu.cairo b/src/operators/nn/functional/relu.cairo index 7555c515d..bdd5c1fee 100644 --- a/src/operators/nn/functional/relu.cairo +++ b/src/operators/nn/functional/relu.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn relu< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -32,5 +28,5 @@ fn relu< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/sigmoid.cairo b/src/operators/nn/functional/sigmoid.cairo index c7ed638aa..2acf5c851 100644 --- a/src/operators/nn/functional/sigmoid.cairo +++ b/src/operators/nn/functional/sigmoid.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::sigmoid docstring fn sigmoid< @@ -23,7 +17,7 @@ fn sigmoid< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -36,6 +30,6 @@ fn sigmoid< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index 95d0f9b9f..10602bde7 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -1,6 +1,6 @@ +use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; -use orion::numbers::fixed_point::core::FixedTrait; /// Cf: NNTrait::softmax docstring fn softmax< @@ -14,6 +14,7 @@ fn softmax< ) -> Tensor { let exp_tensor = z.exp(); let sum = exp_tensor.reduce_sum(axis, true); + exp_tensor / sum } @@ -39,6 +40,7 @@ fn softmaxWide< ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor.reduce_sum(axis, true); + div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/nn/functional/softmax_zero.cairo b/src/operators/nn/functional/softmax_zero.cairo index e90dd4784..8749caa22 100644 --- a/src/operators/nn/functional/softmax_zero.cairo +++ b/src/operators/nn/functional/softmax_zero.cairo @@ -1,14 +1,9 @@ -use core::traits::Into; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - /// Cf: NNTrait::softmax_zero docstring fn softmax_zero< T, @@ -25,6 +20,7 @@ fn softmax_zero< ) -> Tensor { let exp_tensor = exp_zero(*z); let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true); + exp_tensor / sum_no_zero } @@ -54,10 +50,10 @@ fn softmaxWide_zero< ) -> Tensor { let exp_tensor: Tensor = exp_upcast_zero(*z); let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true); + div_downcast(@exp_tensor, @sum_no_zero) } - /// Helper function that compute the exponential of a tensor except if the value of an entry is zero, the value remains zero. /// /// # Arguments @@ -76,7 +72,7 @@ fn exp_zero< >( mut z: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match z.data.pop_front() { @@ -91,7 +87,7 @@ fn exp_zero< }; }; - return TensorTrait::new(z.shape, result.span()); + TensorTrait::new(z.shape, result.span()) } /// Helper function that compute the exponential of a tensor except if the value of an entry is zero, the value remains zero. @@ -119,7 +115,7 @@ fn exp_upcast_zero< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -134,10 +130,9 @@ fn exp_upcast_zero< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } - /// Helper function that compute the reduce sum making sure no none zero value are in the output tensor. /// /// # Arguments @@ -158,42 +153,44 @@ fn reduce_sum_no_zero< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; if (*self.shape).len() == 1 { assert(axis == 0, 'axis out of dimensions'); + let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); output_data.append(current_sum); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; output_shape.append(1); return TensorTrait::new(output_shape.span(), output_data.span()); } else { assert(axis <= (*self.shape).len(), 'axis out of dimensions'); + let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); + let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); - let mut current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); + let mut current_sum = accumulate_sum::< + T + >(*self.data, *self.shape, output_indices, axis); if current_sum == NumberTrait::zero() { current_sum = NumberTrait::one(); } - output_data.append(current_sum); + output_data.append(current_sum); index += 1; - if index == output_data_len { - break (); - }; }; if keepdims { let output_shape = reduce_output_shape(*self.shape, axis, true); - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } else { - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } } } diff --git a/src/operators/nn/functional/softplus.cairo b/src/operators/nn/functional/softplus.cairo index 1d876c535..6292e68af 100644 --- a/src/operators/nn/functional/softplus.cairo +++ b/src/operators/nn/functional/softplus.cairo @@ -1,13 +1,7 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - -use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; +use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - /// Cf: NNTrait::softplus docstring fn softplus< T, @@ -22,7 +16,7 @@ fn softplus< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -34,5 +28,5 @@ fn softplus< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/softsign.cairo b/src/operators/nn/functional/softsign.cairo index 8d20ff297..180c15f02 100644 --- a/src/operators/nn/functional/softsign.cairo +++ b/src/operators/nn/functional/softsign.cairo @@ -1,12 +1,6 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: NNTrait::softsign docstring fn softsign< @@ -22,7 +16,7 @@ fn softsign< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -34,5 +28,5 @@ fn softsign< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/functional/space_to_depth.cairo b/src/operators/nn/functional/space_to_depth.cairo index 6b0881d8b..d8e8089cb 100644 --- a/src/operators/nn/functional/space_to_depth.cairo +++ b/src/operators/nn/functional/space_to_depth.cairo @@ -1,15 +1,9 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - /// Cf: NNTrait::space_to_depth docstring fn space_to_depth< T, @@ -24,7 +18,8 @@ fn space_to_depth< >( tensor: Tensor, blocksize: usize ) -> Tensor { - assert!((tensor.shape).len() == 4, "Unexpected shape 4."); + assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); + let b = (tensor.shape).at(0); let C = (tensor.shape).at(1); let H = (tensor.shape).at(2); @@ -33,5 +28,6 @@ fn space_to_depth< let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span()); let finalshape = array![*b, *C * blocksize * blocksize, *H / blocksize, *W / blocksize]; - return transposed.reshape(target_shape: finalshape.span()); + + transposed.reshape(target_shape: finalshape.span()) } diff --git a/src/operators/nn/functional/thresholded_relu.cairo b/src/operators/nn/functional/thresholded_relu.cairo index 36533660b..a160bdb50 100644 --- a/src/operators/nn/functional/thresholded_relu.cairo +++ b/src/operators/nn/functional/thresholded_relu.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn thresholded_relu< >( mut z: Tensor, alpha: @T ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -32,5 +28,5 @@ fn thresholded_relu< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 3e1ae2dff..1c018ade3 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -61,10 +59,12 @@ impl FP16x16NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } - fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + fn depth_to_space( + tensor: @Tensor, blocksize: usize, mode: felt252 + ) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -90,7 +90,7 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -101,7 +101,7 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -129,7 +129,7 @@ impl FP16x16NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 2a12d137e..a5725eccb 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -55,10 +53,12 @@ impl FP32x32NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } - fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + fn depth_to_space( + tensor: @Tensor, blocksize: usize, mode: felt252 + ) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -84,7 +84,7 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -95,7 +95,7 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -123,7 +123,7 @@ impl FP32x32NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index a56c7c1f5..01a3b30ad 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -55,10 +53,12 @@ impl FP64x64NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } - fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + fn depth_to_space( + tensor: @Tensor, blocksize: usize, mode: felt252 + ) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -84,7 +84,7 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -95,7 +95,7 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -123,7 +123,7 @@ impl FP64x64NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 512c259e8..d80d2c323 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -62,7 +60,7 @@ impl FP8x23NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -88,7 +86,7 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -99,7 +97,7 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -127,7 +125,7 @@ impl FP8x23NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 67883ea2a..29a94d288 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -53,7 +51,7 @@ impl I32NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -79,7 +77,7 @@ impl I32NN of NNTrait { ) -> Tensor { panic(array!['not supported!']) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -90,7 +88,7 @@ impl I32NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -118,7 +116,7 @@ impl I32NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index bd333834c..e22de6b43 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -53,7 +51,7 @@ impl I8NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -79,7 +77,7 @@ impl I8NN of NNTrait { ) -> Tensor { panic(array!['not supported!']) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -90,7 +88,7 @@ impl I8NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -118,7 +116,7 @@ impl I8NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 15fb25ce0..7352b7ad9 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; @@ -53,7 +51,7 @@ impl U32NN of NNTrait { fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) } - + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { functional::space_to_depth::space_to_depth(*tensor, blocksize) } @@ -79,7 +77,7 @@ impl U32NN of NNTrait { ) -> Tensor { panic(array!['not supported!']) } - + fn col2im( data: @Tensor, image_shape: Span, @@ -90,7 +88,7 @@ impl U32NN of NNTrait { ) -> Tensor { functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) } - + fn conv_transpose( X: @Tensor, W: @Tensor, @@ -118,7 +116,7 @@ impl U32NN of NNTrait { strides ) } - + fn conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/sequence/functional/concat_from_sequence.cairo b/src/operators/sequence/functional/concat_from_sequence.cairo index 336bb0553..6b15c2719 100644 --- a/src/operators/sequence/functional/concat_from_sequence.cairo +++ b/src/operators/sequence/functional/concat_from_sequence.cairo @@ -1,15 +1,8 @@ -use core::clone::Clone; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; -use core::debug::PrintTrait; -use core::traits::Into; - use orion::operators::tensor::helpers::replace_index; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::math::concat::concat; use orion::numbers::{NumberTrait, I32IntoU32}; - fn concat_from_sequence< T, impl TTensorTrait: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, >( @@ -33,7 +26,6 @@ fn concat_from_sequence< } } - fn concat_without_new_axis< T, impl TTensorTrait: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, >( @@ -44,18 +36,17 @@ fn concat_without_new_axis< /// assert in range [-r, r - 1] assert( - (axis_is_negative == false && axis_value <= r - 1) - || (axis_is_negative == true && axis_value <= r), + (!axis_is_negative && axis_value <= r - 1) || (axis_is_negative && axis_value <= r), 'Out of bounds for dimension' ); - if axis_is_negative == true { + if axis_is_negative { axis_value = r - axis_value } + concat(sequence.span(), axis_value) } - fn concat_with_new_axis< T, impl TTensorTrait: TensorTrait, impl TCopy: Copy, impl TDrop: Drop, >( @@ -66,20 +57,20 @@ fn concat_with_new_axis< /// assert in range [-r - 1, r] assert( - (axis_is_negative == false && axis_value <= r) - || (axis_is_negative == true && axis_value <= r + 1), + (!axis_is_negative && axis_value <= r) || (axis_is_negative && axis_value <= r + 1), 'Out of bounds for dimension' ); - if axis_is_negative == true { + if axis_is_negative { if axis_value > r { axis_value = 0 } else { axis_value = r - axis_value } } + let mut input_sequence_copy = sequence; - let mut reshaped_sequence = ArrayTrait::>::new(); + let mut reshaped_sequence: Array> = array![]; loop { match input_sequence_copy.pop_front() { Option::Some(input_sequence_value) => { @@ -89,6 +80,7 @@ fn concat_with_new_axis< Option::None => { break; } }; }; + concat(reshaped_sequence.span(), axis_value) } @@ -99,7 +91,7 @@ fn add_new_dimension< mut tensor: Tensor, axis: usize ) -> Tensor { let mut tensor_shape = tensor.shape; - let mut new_tensor_shape = ArrayTrait::::new(); + let mut new_tensor_shape: Array = array![]; let mut tensor_shape_counter: usize = 0; loop { match tensor_shape.pop_front() { @@ -113,8 +105,10 @@ fn add_new_dimension< Option::None => { break; } }; }; + if axis >= tensor.shape.len() { new_tensor_shape.append(1); } + TensorTrait::::new(new_tensor_shape.span(), tensor.data) } diff --git a/src/operators/sequence/functional/sequence_at.cairo b/src/operators/sequence/functional/sequence_at.cairo index 4a4aa9203..3ca1d567a 100644 --- a/src/operators/sequence/functional/sequence_at.cairo +++ b/src/operators/sequence/functional/sequence_at.cairo @@ -1,6 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; @@ -17,15 +14,16 @@ fn sequence_at, impl TCopy: Copy, impl TDrop: let position_value: u32 = position_value_i32.into(); assert( - (is_negative == false && position_value <= sequence.len() - 1) - || (is_negative == true && position_value <= sequence.len()), + (!is_negative && position_value <= sequence.len() - 1) + || (is_negative && position_value <= sequence.len()), 'Position out of bounds' ); - if is_negative == false { - return *sequence.at(position_value); + if !is_negative { + *sequence.at(position_value) } else { let normalized_position_value = sequence.len() - position_value; - return *sequence.at(normalized_position_value); + + *sequence.at(normalized_position_value) } } diff --git a/src/operators/sequence/functional/sequence_construct.cairo b/src/operators/sequence/functional/sequence_construct.cairo index 18902b078..d86ccfdb6 100644 --- a/src/operators/sequence/functional/sequence_construct.cairo +++ b/src/operators/sequence/functional/sequence_construct.cairo @@ -1,11 +1,8 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: SequenceTrait::sequence_construct docstring fn sequence_construct>(tensors: Array>) -> Array> { assert(tensors.len() >= 1, 'Input tensors must be >= 1'); - return tensors; + tensors } diff --git a/src/operators/sequence/functional/sequence_empty.cairo b/src/operators/sequence/functional/sequence_empty.cairo index 93e2989cc..ee74df0a5 100644 --- a/src/operators/sequence/functional/sequence_empty.cairo +++ b/src/operators/sequence/functional/sequence_empty.cairo @@ -1,16 +1,13 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: SequenceTrait::sequence_empty docstring fn sequence_empty, impl TDrop: Drop>() -> Array> { - let mut sequence = ArrayTrait::new(); + let mut sequence = array![]; - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(0); - let mut data = ArrayTrait::new(); + let mut data: Array = array![]; let tensor = TensorTrait::new(shape.span(), data.span()); sequence.append(tensor); diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index 3c6a6d57a..7c274e700 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -1,6 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; use orion::numbers::{NumberTrait, I32IntoU32}; @@ -12,9 +9,10 @@ fn sequence_erase, impl TCopy: Copy, impl TDr let position: Tensor = match position { Option::Some(p) => p, Option::None => { - let mut shape = ArrayTrait::::new(); - let mut data = ArrayTrait::::new(); + let mut shape: Array = array![]; + let mut data: Array = array![]; data.append(-1_i32); + TensorTrait::::new(shape.span(), data.span()) } }; @@ -26,17 +24,17 @@ fn sequence_erase, impl TCopy: Copy, impl TDr let mut position_value: u32 = position_value_i32.into(); assert( - (is_negative == false && position_value <= sequence.len() - 1) - || (is_negative == true && position_value <= sequence.len()), + (!is_negative && position_value <= sequence.len() - 1) + || (is_negative && position_value <= sequence.len()), 'Position out of bounds' ); - if is_negative == true { + if is_negative { position_value = sequence.len() - position_value; } let mut input_sequence_copy = sequence; - let mut output_sequence = ArrayTrait::>::new(); + let mut output_sequence: Array> = array![]; let mut tensor_counter: usize = 0; loop { match input_sequence_copy.pop_front() { @@ -45,14 +43,14 @@ fn sequence_erase, impl TCopy: Copy, impl TDr tensor_counter += 1; continue; } - output_sequence.append(input_sequence_value); + output_sequence.append(input_sequence_value); tensor_counter += 1; }, Option::None => { break; } }; }; - return output_sequence; + output_sequence } diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 83b333387..df19120b9 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -1,6 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; use orion::numbers::{NumberTrait, I32IntoU32}; @@ -12,9 +9,10 @@ fn sequence_insert, impl TCopy: Copy, impl TD let position: Tensor = match position { Option::Some(p) => p, Option::None => { - let mut shape = ArrayTrait::::new(); - let mut data = ArrayTrait::::new(); + let mut shape: Array = array![]; + let mut data: Array = array![]; data.append(-1_i32); + TensorTrait::::new(shape.span(), data.span()) }, }; @@ -26,16 +24,16 @@ fn sequence_insert, impl TCopy: Copy, impl TD let mut position_value: u32 = position_value_i32.into(); assert( - (is_negative == false && position_value <= self.len() - 1) - || (is_negative == true && position_value <= self.len()), + (!is_negative && position_value <= self.len() - 1) + || (is_negative && position_value <= self.len()), 'Position out of bounds' ); - if is_negative == true { + if is_negative { position_value = self.len() - position_value; } - let mut new_sequence = ArrayTrait::>::new(); + let mut new_sequence: Array> = array![]; let mut inserted = false; let mut self_copy = self; loop { @@ -46,7 +44,7 @@ fn sequence_insert, impl TCopy: Copy, impl TD inserted = true; } new_sequence.append(t); - if inserted == false { + if !inserted { position_value -= 1; } }, @@ -54,5 +52,5 @@ fn sequence_insert, impl TCopy: Copy, impl TD }; }; - return new_sequence; + new_sequence } diff --git a/src/operators/sequence/functional/sequence_length.cairo b/src/operators/sequence/functional/sequence_length.cairo index 84f91e48f..0409c5809 100644 --- a/src/operators/sequence/functional/sequence_length.cairo +++ b/src/operators/sequence/functional/sequence_length.cairo @@ -1,12 +1,9 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: SequenceTrait::sequence_length docstring fn sequence_length>(self: Array>) -> Tensor { - let mut shape = ArrayTrait::::new(); - let mut result = ArrayTrait::new(); + let mut shape: Array = array![]; + let mut result: Array = array![]; result.append(self.len()); Tensor:: { shape: shape.span(), data: result.span(), } diff --git a/src/operators/sequence/implementations/sequence_bool.cairo b/src/operators/sequence/implementations/sequence_bool.cairo index 7c1402db1..d9f0de151 100644 --- a/src/operators/sequence/implementations/sequence_bool.cairo +++ b/src/operators/sequence/implementations/sequence_bool.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; diff --git a/src/operators/sequence/implementations/sequence_fp16x16.cairo b/src/operators/sequence/implementations/sequence_fp16x16.cairo index d03967b32..bcc6793b1 100644 --- a/src/operators/sequence/implementations/sequence_fp16x16.cairo +++ b/src/operators/sequence/implementations/sequence_fp16x16.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP16x16Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp16x16wide.cairo b/src/operators/sequence/implementations/sequence_fp16x16wide.cairo index bfaa11f37..bc133efc8 100644 --- a/src/operators/sequence/implementations/sequence_fp16x16wide.cairo +++ b/src/operators/sequence/implementations/sequence_fp16x16wide.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W; use orion::operators::tensor::implementations::tensor_fp16x16wide::FP16x16WTensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP16x16WSequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp32x32.cairo b/src/operators/sequence/implementations/sequence_fp32x32.cairo index 2025d6161..2a9af7255 100644 --- a/src/operators/sequence/implementations/sequence_fp32x32.cairo +++ b/src/operators/sequence/implementations/sequence_fp32x32.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::FP32x32; use orion::operators::tensor::implementations::tensor_fp32x32::FP32x32Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP32x32Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp64x64.cairo b/src/operators/sequence/implementations/sequence_fp64x64.cairo index a1c0d3f89..004450f21 100644 --- a/src/operators/sequence/implementations/sequence_fp64x64.cairo +++ b/src/operators/sequence/implementations/sequence_fp64x64.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::FP64x64; use orion::operators::tensor::implementations::tensor_fp64x64::FP64x64Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP64x64Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp8x23.cairo b/src/operators/sequence/implementations/sequence_fp8x23.cairo index ae9bfd18d..567faccd8 100644 --- a/src/operators/sequence/implementations/sequence_fp8x23.cairo +++ b/src/operators/sequence/implementations/sequence_fp8x23.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP8x23Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_fp8x23wide.cairo b/src/operators/sequence/implementations/sequence_fp8x23wide.cairo index 5dc8e246e..994298877 100644 --- a/src/operators/sequence/implementations/sequence_fp8x23wide.cairo +++ b/src/operators/sequence/implementations/sequence_fp8x23wide.cairo @@ -1,5 +1,3 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; @@ -7,7 +5,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::FP8x23W; use orion::operators::tensor::implementations::tensor_fp8x23wide::FP8x23WTensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl FP8x23WSequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_i32.cairo b/src/operators/sequence/implementations/sequence_i32.cairo index 8a267c244..f99c4d592 100644 --- a/src/operators/sequence/implementations/sequence_i32.cairo +++ b/src/operators/sequence/implementations/sequence_i32.cairo @@ -1,11 +1,8 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl I32Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_i8.cairo b/src/operators/sequence/implementations/sequence_i8.cairo index 700e52867..3dc80952e 100644 --- a/src/operators/sequence/implementations/sequence_i8.cairo +++ b/src/operators/sequence/implementations/sequence_i8.cairo @@ -1,12 +1,9 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; use orion::operators::tensor::implementations::tensor_i8::I8Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl I8Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/sequence/implementations/sequence_u32.cairo b/src/operators/sequence/implementations/sequence_u32.cairo index 34ca8d578..be5b5deda 100644 --- a/src/operators/sequence/implementations/sequence_u32.cairo +++ b/src/operators/sequence/implementations/sequence_u32.cairo @@ -1,12 +1,9 @@ -use core::option::OptionTrait; - use orion::operators::tensor::core::Tensor; use orion::operators::sequence::core::SequenceTrait; use orion::operators::sequence::functional; use orion::operators::tensor::implementations::tensor_u32::U32Tensor; use orion::operators::tensor::implementations::tensor_i32::I32Tensor; - impl U32Sequence of SequenceTrait { fn sequence_construct(tensors: Array>) -> Array> { functional::sequence_construct::sequence_construct(tensors) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 23018044d..6fd7a7779 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1,7 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; -use core::serde::Serde; -use core::option::OptionTrait; - use alexandria_data_structures::array_ext::{SpanTraitExt}; //::resize::{MODE, NEAREST_MODE, KEEP_ASPECT_RATIO_POLICY, TRANSFORMATION_MODE}; @@ -5224,7 +5220,10 @@ trait TensorTrait { /// ``` /// fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor; /// # tensor.scatter_nd /// @@ -5304,10 +5303,7 @@ trait TensorTrait { /// ``` /// fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor; /// # tensor.dynamic_quantize_linear /// @@ -5364,9 +5360,7 @@ trait TensorTrait { /// >>> ([133, 233, 236, 255, -18, -0], [0.02745], [128] /// ``` /// - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor, Tensor, Tensor); + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor, Tensor, Tensor); /// # tensor.optional /// /// ```rust @@ -5664,7 +5658,9 @@ trait TensorTrait { /// >>> [[[[7299130, 4884492]], [[2339070, 1559536]], [[3448557, 984617]], [[5745934, 3670947]], [[4665989, 3079292]], [[3375288, 948254]], [[3749966, 4911069]], [[1358829, 4368105]]]] /// ``` /// - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor; + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index caa8d3b21..9abbc9c1c 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use alexandria_data_structures::array_ext::ArrayTraitExt; use orion::utils::u32_max; @@ -27,7 +23,7 @@ fn len_from_shape(mut shape: Span) -> usize { }; }; - return result; + result } /// Verifies if the shape and the data array of a tensor are compatible. @@ -98,7 +94,7 @@ fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> }; }; - return result; + result } /// Generates the output shape after reducing a tensor along a specified axis. @@ -117,7 +113,7 @@ fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> fn reduce_output_shape(mut input_shape: Span, axis: usize, keepdims: bool) -> Span { assert(axis < input_shape.len(), 'axis out of dimensions'); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut n: usize = 0; loop { @@ -137,7 +133,7 @@ fn reduce_output_shape(mut input_shape: Span, axis: usize, keepdims: bool }; }; - return output_shape.span(); + output_shape.span() } @@ -158,7 +154,7 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> let axes_len = axes.len(); assert(input_shape.len() == axes_len, 'input_shape/indices len unequal'); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; loop { match axes.pop_front() { @@ -167,7 +163,7 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> }; }; - return output_shape.span(); + output_shape.span() } /// Combines output indices with the current index of the specified axis. @@ -186,14 +182,10 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> fn combine_indices(mut output_indices: Span, axis_index: usize, axis: usize) -> Span { assert(axis <= output_indices.len(), 'axis value is out of range'); - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; let mut n: usize = 0; - loop { - if n > output_indices.len() { - break (); - } - + while n != output_indices.len() + 1 { if n == axis { result.append(axis_index); } else if n > axis { @@ -205,7 +197,7 @@ fn combine_indices(mut output_indices: Span, axis_index: usize, axis: usi n += 1; }; - return result.span(); + result.span() } @@ -237,7 +229,7 @@ fn find_axis(mut axes: Span, target_axis: usize) -> usize { }; }; - return axis; + axis } /// Computes the broadcasted shape of two tensors. @@ -254,7 +246,7 @@ fn find_axis(mut axes: Span, target_axis: usize) -> usize { /// * A Span of usize representing the broadcasted shape. fn broadcast_shape(mut shape1: Span, mut shape2: Span) -> Span { check_compatibility(shape1, shape2); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; loop { let mut dim1 = 1; @@ -278,7 +270,7 @@ fn broadcast_shape(mut shape1: Span, mut shape2: Span) -> Span, mut shape2: Span) -> Span` - The modified shape fn replace_index(mut shape: Span, index: usize, value: usize) -> Span { - let mut output = ArrayTrait::new(); + let mut output: Array = array![]; let mut i = 0; loop { @@ -314,7 +306,7 @@ fn replace_index(mut shape: Span, index: usize, value: usize) -> Span, index: usize, value: usize) -> Span` - A span containing the usize elements representing the axes. fn get_all_axes(shape: Span) -> Span { - let mut ret: Array = ArrayTrait::new(); + let mut ret: Array = array![]; let mut i: usize = 0; let stop_i = shape.len() - 1; loop { @@ -339,6 +331,7 @@ fn get_all_axes(shape: Span) -> Span { } i += 1; }; + ret.span() } @@ -353,20 +346,14 @@ fn flatten_array_of_tensors, +Drop,>( let stride_lim: usize = *new_stride.at(axis); let max_row = (*(*tensors.at(0).shape).at(0)); let mut row = 0; - loop { - if row >= max_row { - break; - } + while row != max_row { let mut tensors_span = tensors.span(); loop { let mut i = 0; match tensors_span.pop_front() { Option::Some(mut t) => { let mut data = *t.data; - loop { - if i >= stride_lim { - break; - } + while i != stride_lim { let idx = i + (row * stride_lim); flattened.append(*data.at(idx)); i += 1; @@ -375,8 +362,10 @@ fn flatten_array_of_tensors, +Drop,>( Option::None => { break; }, } }; + row += 1; }; + flattened.span() } @@ -390,27 +379,18 @@ fn as_tensors_array, +Drop, +TensorTrait,>( let mut axes: Array = array![]; let mut idx: usize = 0; - loop { - if idx >= rank { - break; - } + while idx != rank { axes.append(idx); idx += 1; }; idx = 0; let axis_len: usize = *shape.at(axis); - loop { - if idx >= axis_len { - break; - } + while idx != axis_len { let mut starts: Array = array![]; let mut ends: Array = array![]; let mut i: usize = 0; - loop { - if i >= rank { - break; - } + while i != rank { starts.append(if i == axis { idx } else { @@ -436,6 +416,7 @@ fn as_tensors_array, +Drop, +TensorTrait,>( idx += 1; }; + as_tensors } @@ -476,6 +457,7 @@ fn span_cmp, +Copy, +PartialEq, +PartialOrd>( } }; }; + ret } @@ -511,18 +493,18 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par fn optional_has_element, +Drop, +TensorTrait,>( x: Option> ) -> Tensor { - match x{ + match x { Option::Some => { - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(1); - let mut data = ArrayTrait::::new(); + let mut data: Array = array![]; data.append(true); TensorTrait::new(shape.span(), data.span()) }, Option::None => { - let mut shape = ArrayTrait::::new(); + let mut shape: Array = array![]; shape.append(1); - let mut data = ArrayTrait::::new(); + let mut data: Array = array![]; data.append(false); TensorTrait::new(shape.span(), data.span()) } @@ -544,12 +526,8 @@ fn optional_has_element, +Drop, +TensorTrait,>( fn optional_get_element, +Drop, +TensorTrait,>( x: Option> ) -> Tensor { - match x{ - Option::Some(ele) => { - ele - }, - Option::None => { - panic(array!['The input is an empty', 'optional-type.']) - } + match x { + Option::Some(ele) => { ele }, + Option::None => { panic(array!['The input is an empty', 'optional-type.']) } } } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index bb90c304c..75617080f 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -534,10 +534,12 @@ impl I8Tensor of TensorTrait { manipulation::split::split(self, axis, num_outputs, spl) } - fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + fn random_uniform_like( + tensor: @Tensor, high: Option, low: Option, seed: Option + ) -> Tensor { panic(array!['not supported!']) } - + fn range(start: i8, end: i8, step: i8) -> Tensor { math::range::range(start, end, step) } @@ -553,7 +555,7 @@ impl I8Tensor of TensorTrait { fn blackman_window(size: i8, periodic: Option) -> Tensor { panic(array!['not supported!']) } - + fn split_to_sequence( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { @@ -561,26 +563,24 @@ impl I8Tensor of TensorTrait { } fn reverse_sequence( - self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option ) -> Tensor { manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } - - fn optional(self: @Tensor) -> Option>{ + + fn optional(self: @Tensor) -> Option> { manipulation::optional::optional(self) } - - fn dynamic_quantize_linear( - self: @Tensor - ) -> (Tensor::, Tensor::, Tensor){ + + fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor::, Tensor) { panic(array!['not supported!']) } fn scatter_nd( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } @@ -684,7 +684,7 @@ impl I8TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { + while lhs.shape.len() != 0 && is_eq { is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); }; @@ -692,9 +692,9 @@ fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { return false; } - while lhs.data.len() == 0 && !is_eq { + while lhs.data.len() == 0 && !is_eq { is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); }; is_eq -} \ No newline at end of file +} diff --git a/src/operators/tensor/manipulation/optional.cairo b/src/operators/tensor/manipulation/optional.cairo index e57e35e69..53d26d423 100644 --- a/src/operators/tensor/manipulation/optional.cairo +++ b/src/operators/tensor/manipulation/optional.cairo @@ -1,13 +1,7 @@ -use core::option::OptionTrait; use orion::operators::tensor::{Tensor, TensorTrait}; /// Cf: TensorTrait::optional docstring -fn optional< - T, - +Copy, - +Drop, - impl TOption: OptionTrait ->( +fn optional, +Drop, impl TOption: OptionTrait>( self: @Tensor ) -> Option> { Option::Some(*self) diff --git a/src/operators/tensor/manipulation/reverse_sequence.cairo b/src/operators/tensor/manipulation/reverse_sequence.cairo index efec92399..8bb45fe9a 100644 --- a/src/operators/tensor/manipulation/reverse_sequence.cairo +++ b/src/operators/tensor/manipulation/reverse_sequence.cairo @@ -1,28 +1,23 @@ -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; /// Cf: TensorTrait::reverse_sequence docstring -fn reverse_sequence< - T, - impl TTensor: TensorTrait, - impl TCopy: Copy, - impl TDrop: Drop ->( - self: @Tensor, - sequence_lens: Tensor, - batch_axis: Option, +fn reverse_sequence, impl TCopy: Copy, impl TDrop: Drop>( + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, time_axis: Option -) -> Tensor{ +) -> Tensor { let shape = *self.shape; let mut data: Array = array![]; let has_batch_axis: usize = match batch_axis { - Option::Some(value) => { - assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); - value - }, - Option::None => 0, + Option::Some(value) => { + assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); + value + }, + Option::None => 0, }; + let has_time_axis: usize = match time_axis { Option::Some(value) => { assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0."); @@ -30,8 +25,9 @@ fn reverse_sequence< }, Option::None => 1, }; + assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal"); - assert!((*self.data).len() >= 2, "Tensor of rank r >= 2"); + assert((*self.data).len() >= 2, 'Tensor of rank r >= 2'); let control: bool = if has_batch_axis == 0 && has_time_axis == 1 { true } else { @@ -41,69 +37,64 @@ fn reverse_sequence< let mut index: Array = reverse_index(*self.shape, sequence_lens, control); loop { match index.pop_front() { - Option::Some(ele) => { - data.append(*((*self).data).at(ele)); - }, - Option::None => { - break; - } + Option::Some(ele) => { data.append(*((*self).data).at(ele)); }, + Option::None => { break; } } }; - + TensorTrait::::new(shape, data.span()) } -fn reverse_index( - shape: Span, sequence_lens: Tensor, control: bool -) -> Array { +fn reverse_index(shape: Span, sequence_lens: Tensor, control: bool) -> Array { let x: usize = *shape.at(0); let y: usize = *shape.at(1); - let mut result = ArrayTrait::::new(); + let mut result: Array = array![]; if control { // [i, slice] - assert!(sequence_lens.data.len() <= x,"The length of sequence_lens cannot exceed batch_axis"); + assert!( + sequence_lens.data.len() <= x, "The length of sequence_lens cannot exceed batch_axis" + ); let mut i: usize = 0; - loop { - if i >= x { - break; - } - + while i != x { let reverse: usize = (*sequence_lens.data.at(i)); - assert!(reverse <= y && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); + assert!( + reverse <= y && reverse >= 1, + "sequence_lens must be greater than one and less than batch_size" + ); let mut j: usize = reverse - 1; loop { - if j == 0 { result.append(i * y + j); break; } + result.append(i * y + j); j -= 1; }; let current_index_len: usize = (i + 1) * y - 1; let mut j: usize = result.len(); - loop { - if j > current_index_len { - break; - } + while j != current_index_len + 1 { result.append(j); j += 1; }; + i += 1; }; } else { // [slice, i] - assert!(sequence_lens.data.len() <= y,"The length of sequence_lens cannot exceed time_axis"); + assert!( + sequence_lens.data.len() <= y, "The length of sequence_lens cannot exceed time_axis" + ); let mut tmp = ArrayTrait::::new(); let mut i: usize = 0; - loop { - if i > y - 1 { - break; - } + while i != y { let reverse: usize = *sequence_lens.data.at(i); - assert!(reverse <= x && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); + assert!( + reverse <= x && reverse >= 1, + "sequence_lens must be greater than one and less than batch_size" + ); let mut j: usize = reverse - 1; loop { @@ -115,31 +106,26 @@ fn reverse_index( j -= 1; }; let mut j: usize = reverse; - loop { - if j > x - 1 { - break; - } + while j != x { tmp.append(j * y + i); j += 1; }; + i += 1; }; + let tmp = tmp.span(); - let mut i : usize = 0; - loop { - if i > x - 1 { - break; - } + let mut i: usize = 0; + while i != x { let mut j: usize = 0; - loop { - if j > y - 1 { - break; - } + while j != y { result.append((*tmp.at(j * x + i))); j += 1; }; + i += 1; }; } + result -} \ No newline at end of file +} diff --git a/src/operators/tensor/manipulation/split.cairo b/src/operators/tensor/manipulation/split.cairo index 3919c034f..a8036f219 100644 --- a/src/operators/tensor/manipulation/split.cairo +++ b/src/operators/tensor/manipulation/split.cairo @@ -1,23 +1,16 @@ use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; /// Cf: TensorTrait::split docstring -fn split< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split, +Drop, +TensorTrait,>( self: @Tensor, axis: usize, num_outputs: Option, split: Option> ) -> Array> { let has_num_outputs = match num_outputs { - Option::Some => { true }, + Option::Some => true, Option::None => false, }; let has_split = match split { - Option::Some => { true }, + Option::Some => true, Option::None => false, }; assert(!(has_num_outputs && has_split), 'split or num_outputs not both.'); @@ -34,6 +27,7 @@ fn split< } else { splited_t = split_has_split(self, axis, split.unwrap()); } + splited_t } @@ -52,23 +46,18 @@ fn split_num_outputs, +Drop, +TensorTrait,>( if (*(*t).shape.at(axis) % num_outputs == 0) { div = *(*t).shape.at(axis) / num_outputs; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; } else { div = *(*t).shape.at(axis) / num_outputs + 1; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; + match split.pop_front() { Option::Some(split_last_one) => { split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); @@ -80,34 +69,29 @@ fn split_num_outputs, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.len()) { - break; - } + while i != split.len() { let spl = *split.at(i); sli.set(axis, 0, pos); pos += spl; sli.set(axis, 1, pos); let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 @@ -121,6 +105,7 @@ fn split_num_outputs, +Drop, +TensorTrait,>( splited_t.append(sub_t); i += 1; }; + splited_t } @@ -133,34 +118,29 @@ fn split_has_split, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.data.len()) { - break; - } + while i != split.data.len() { let spl: usize = split.at(indices: array![i].span()); sli.set(axis, 0, pos); pos += spl; sli.set(axis, 1, pos); let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 @@ -174,5 +154,6 @@ fn split_has_split, +Drop, +TensorTrait,>( splited_t.append(sub_t); i += 1; }; + splited_t } diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo index 7ff3ff8db..46dbe1af7 100644 --- a/src/operators/tensor/manipulation/split_to_sequence.cairo +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -1,44 +1,34 @@ use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; /// Cf: NNTrait::split docstring -fn split_to_sequence< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split_to_sequence, +Drop, +TensorTrait,>( self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { let has_split = match split { - Option::Some => { true }, + Option::Some => true, Option::None => false, }; let mut has_num_outputs = false; let mut split_unwrap: Tensor = TensorTrait::new(array![1].span(), array![1].span()); - if (!has_split){ + if (!has_split) { let split_length = *(*self.shape).at(axis); let mut split_data: Array = array![]; let mut i = 0; - loop{ - if (i >= split_length) { - break; - } + while i != split_length { split_data.append(1); - i += 1; + i += 1; }; + split_unwrap = TensorTrait::new(array![split_length].span(), split_data.span()); - }else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) { + } else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) { // A scalar has_num_outputs = true; split_unwrap = split.unwrap(); - }else{ + } else { split_unwrap = split.unwrap(); } - let mut splited_t: Array> = array![]; @@ -52,28 +42,25 @@ fn split_to_sequence< splited_t = split_has_split(self, axis, split_unwrap); } - if (keepdims==0 && has_split==false) { + if (keepdims == 0 && !has_split) { let mut splited_t_temp: Array> = array![]; let mut i = 0; - loop{ - if (i >= splited_t.len()) { - break; - } + while i != splited_t.len() { let mut shape: Array = array![]; let mut j = 0; let shape_in_splited: Span = *splited_t.at(i).shape; - loop{ - if ( j >= shape_in_splited.len()) { - break; + while j != shape_in_splited.len() { + if (j != axis) { + shape.append(*shape_in_splited.at(j)) } - if (j!=axis) { - shape.append(*shape_in_splited.at(j)) - } - j += 1; + + j += 1; }; + splited_t_temp.append(splited_t[i].reshape(shape.span())); - i += 1; + i += 1; }; + return splited_t_temp; } splited_t @@ -82,12 +69,7 @@ fn split_to_sequence< /// Subfunction split for tensors (wth num_outputs). /// Cf: TensorTrait::split docstring -fn split_num_outputs< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split_num_outputs, +Drop, +TensorTrait,>( t: @Tensor, mut axis: usize, num_outputs: usize ) -> Array> { let mut splited_t: Array> = array![]; @@ -100,23 +82,18 @@ fn split_num_outputs< if (*(*t).shape.at(axis) % num_outputs == 0) { div = *(*t).shape.at(axis) / num_outputs; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; } else { div = *(*t).shape.at(axis) / num_outputs + 1; let mut i = 0; - loop { - if (i >= num_outputs) { - break; - } + while i != num_outputs { split.append(div); i += 1; }; + match split.pop_front() { Option::Some(split_last_one) => { split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); @@ -128,34 +105,29 @@ fn split_num_outputs< let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.len()) { - break; - } + while i != split.len() { let spl = *split.at(i); sli.set(axis, 0, pos); pos += spl; sli.set(axis, 1, pos); let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, + Option::Some(res) => res, Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 @@ -169,37 +141,28 @@ fn split_num_outputs< splited_t.append(sub_t); i += 1; }; + splited_t } /// Subfunction split for tensors (wth split). /// Cf: TensorTrait::split docstring -fn split_has_split< - T, - +Copy, - +Drop, - +TensorTrait, ->( +fn split_has_split, +Drop, +TensorTrait,>( t: @Tensor, axis: usize, split: Tensor ) -> Array> { let mut splited_t: Array> = array![]; let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - loop { - if (i >= (*t).shape.len()) { - break; - } + while i != (*t).shape.len() { let s: usize = *(*t).shape.at(i); sli.set(i, 0, 0); sli.set(i, 1, s); i += 1; }; + let mut i: usize = 0; - loop { - if (i >= split.data.len()) { - break; - } + while i != split.data.len() { let spl: usize = split.at(indices: array![i].span()); sli.set(axis, 0, pos); pos += spl; @@ -227,5 +190,6 @@ fn split_has_split< splited_t.append(sub_t); i += 1; }; + splited_t } diff --git a/src/operators/tensor/manipulation/unique.cairo b/src/operators/tensor/manipulation/unique.cairo index d90b9b8cd..6ace4b2aa 100644 --- a/src/operators/tensor/manipulation/unique.cairo +++ b/src/operators/tensor/manipulation/unique.cairo @@ -1,10 +1,3 @@ -use core::traits::Into; -use core::traits::IndexView; -use core::option::OptionTrait; -use core::array::{SpanTrait, ArrayTrait}; - -use core::debug::PrintTrait; - use alexandria_data_structures::array_ext::{SpanTraitExt, ArrayTraitExt}; use alexandria_sorting::merge_sort::merge; @@ -87,13 +80,7 @@ fn unique_flatten, +Drop, +PartialOrd, +PartialEq,>( } }; - return ( - unique_elements.span(), - new_shape.span(), - indices.span(), - inverse_indices.span(), - count.span() - ); + (unique_elements.span(), new_shape.span(), indices.span(), inverse_indices.span(), count.span()) } /// Subfunction unique for tensors (wth axis). @@ -123,10 +110,7 @@ fn unique_along_axis< let mut unique_tensors_len = unique_tensors.len(); let mut i = 0; - loop { - if (i >= rank) { - break; - } + while i != rank { new_shape.append(if axis == i { unique_tensors_len } else { @@ -166,5 +150,5 @@ fn unique_along_axis< let new_shape_span = new_shape.span(); let unique_elements = flatten_array_of_tensors(unique_tensors, axis, new_shape_span); - return (unique_elements, new_shape_span, indices.span(), inverse_indices.span(), count.span()); + (unique_elements, new_shape_span, indices.span(), inverse_indices.span(), count.span()) } diff --git a/src/operators/tensor/math/abs.cairo b/src/operators/tensor/math/abs.cairo index 129e05b40..e129e94ea 100644 --- a/src/operators/tensor/math/abs.cairo +++ b/src/operators/tensor/math/abs.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -16,7 +12,7 @@ fn abs< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).abs()); }, @@ -24,5 +20,5 @@ fn abs< }; }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/acos.cairo b/src/operators/tensor/math/acos.cairo index 477f11450..799f87994 100644 --- a/src/operators/tensor/math/acos.cairo +++ b/src/operators/tensor/math/acos.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn acos< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).acos()); }, @@ -25,6 +21,6 @@ fn acos< }; }; - return TensorTrait::::new(self.shape, result.span()); + TensorTrait::::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/acosh.cairo b/src/operators/tensor/math/acosh.cairo index c9d159ca0..41717adab 100644 --- a/src/operators/tensor/math/acosh.cairo +++ b/src/operators/tensor/math/acosh.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -18,7 +13,7 @@ fn acosh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -27,5 +22,5 @@ fn acosh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/and.cairo b/src/operators/tensor/math/and.cairo index 07e4c9443..0b1369f35 100644 --- a/src/operators/tensor/math/and.cairo +++ b/src/operators/tensor/math/and.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor}; use orion::operators::tensor::helpers::{ @@ -11,12 +7,12 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::and docstring fn and(y: @Tensor, z: @Tensor) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -25,10 +21,7 @@ fn and(y: @Tensor, z: @Tensor) -> Tensor { result.append(*(*y.data)[indices_self] && *(*z.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/argmax.cairo b/src/operators/tensor/math/argmax.cairo index d4b54f9ae..f16c99b5c 100644 --- a/src/operators/tensor/math/argmax.cairo +++ b/src/operators/tensor/math/argmax.cairo @@ -1,8 +1,3 @@ -use core::debug::PrintTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::numbers::NumberTrait; @@ -36,7 +31,7 @@ fn argmax< return find_argmax_1D::(*self, axis, true, select_last_index); } - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); @@ -44,21 +39,16 @@ fn argmax< let MIN = NumberTrait::min_value(); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_argmax = find_argmax(self, output_indices, axis, 0, MIN, 0, select_last_index); output_data.append(current_argmax); index += 1; - if index == output_data_len { - break (); - }; }; - return TensorTrait::< - usize - >::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()); + TensorTrait::::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()) } /// Helper function that finds the index of the maximum value in a flat tensor. diff --git a/src/operators/tensor/math/argmin.cairo b/src/operators/tensor/math/argmin.cairo index 51502fd52..53087421d 100644 --- a/src/operators/tensor/math/argmin.cairo +++ b/src/operators/tensor/math/argmin.cairo @@ -1,8 +1,3 @@ -use core::debug::PrintTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, combine_indices, len_from_shape}; use orion::numbers::NumberTrait; @@ -36,7 +31,7 @@ fn argmin< return find_argmin_1D(*self, axis, true, select_last_index); } - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); @@ -44,21 +39,16 @@ fn argmin< let MAX = NumberTrait::max_value(); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_argmin = find_argmin(self, output_indices, axis, 0, MAX, 0, select_last_index); output_data.append(current_argmin); index += 1; - if index == output_data_len { - break (); - }; }; - return TensorTrait::< - usize - >::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()); + TensorTrait::::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()) } diff --git a/src/operators/tensor/math/arithmetic.cairo b/src/operators/tensor/math/arithmetic.cairo index fdbbb7863..e744b29f9 100644 --- a/src/operators/tensor/math/arithmetic.cairo +++ b/src/operators/tensor/math/arithmetic.cairo @@ -1,13 +1,6 @@ -use core::option::OptionTrait; -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use orion::operators::tensor::helpers::broadcast_shape; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index,}; -use orion::operators::tensor::helpers::{broadcast_index_mapping, len_from_shape,}; +use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape,}; use orion::utils::saturate; fn add< @@ -16,12 +9,12 @@ fn add< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -30,12 +23,9 @@ fn add< result.append(*(*self.data)[indices_self] + *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn add_by_scalar< @@ -55,7 +45,7 @@ fn add_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele + val); }, @@ -63,7 +53,7 @@ fn add_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_add< @@ -81,12 +71,12 @@ fn saturated_add< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -104,12 +94,9 @@ fn saturated_add< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn sub< @@ -118,12 +105,12 @@ fn sub< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -132,12 +119,9 @@ fn sub< result.append(*(*self.data)[indices_self] - *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn sub_by_scalar< @@ -157,7 +141,7 @@ fn sub_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele - val); }, @@ -165,7 +149,7 @@ fn sub_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_sub< @@ -183,12 +167,12 @@ fn saturated_sub< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -206,12 +190,9 @@ fn saturated_sub< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn mul< @@ -220,12 +201,12 @@ fn mul< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -234,12 +215,9 @@ fn mul< result.append(*(*self.data)[indices_self] * *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn mul_by_scalar< @@ -259,7 +237,7 @@ fn mul_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele * val); }, @@ -267,7 +245,7 @@ fn mul_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_mul< @@ -285,12 +263,12 @@ fn saturated_mul< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -308,12 +286,9 @@ fn saturated_mul< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn div< @@ -322,12 +297,12 @@ fn div< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -336,12 +311,9 @@ fn div< result.append(*(*self.data)[indices_self] / *(*other.data)[indices_other]); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn div_by_scalar< @@ -361,7 +333,7 @@ fn div_by_scalar< } let mut input_data = *self.data; - let mut data_result = ArrayTrait::::new(); + let mut data_result = array![]; loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele / val); }, @@ -369,7 +341,7 @@ fn div_by_scalar< }; }; - return TensorTrait::::new(*self.shape, data_result.span()); + TensorTrait::::new(*self.shape, data_result.span()) } fn saturated_div< @@ -387,12 +359,12 @@ fn saturated_div< self: @Tensor, other: @Tensor, min_saturation: T, max_saturation: T ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -410,12 +382,9 @@ fn saturated_div< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } fn div_downcast< @@ -433,12 +402,12 @@ fn div_downcast< self: @Tensor, other: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*self.shape, *other.shape); - let mut result = ArrayTrait::new(); + let mut result = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -451,10 +420,7 @@ fn div_downcast< ); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/asin.cairo b/src/operators/tensor/math/asin.cairo index 60c440d8d..49a00ae19 100644 --- a/src/operators/tensor/math/asin.cairo +++ b/src/operators/tensor/math/asin.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -17,7 +13,7 @@ fn asin< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -26,5 +22,5 @@ fn asin< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/asinh.cairo b/src/operators/tensor/math/asinh.cairo index b94efa9a4..6e9f06a3b 100644 --- a/src/operators/tensor/math/asinh.cairo +++ b/src/operators/tensor/math/asinh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::asinh docstring fn asinh< T, @@ -19,7 +13,7 @@ fn asinh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -28,6 +22,6 @@ fn asinh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/atan.cairo b/src/operators/tensor/math/atan.cairo index f08271c0c..9d32a6ead 100644 --- a/src/operators/tensor/math/atan.cairo +++ b/src/operators/tensor/math/atan.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn atan< T, MAG, @@ -18,7 +12,7 @@ fn atan< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -27,5 +21,5 @@ fn atan< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/binarizer.cairo b/src/operators/tensor/math/binarizer.cairo index 0a02bc91b..0b66a4d4e 100644 --- a/src/operators/tensor/math/binarizer.cairo +++ b/src/operators/tensor/math/binarizer.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -23,7 +19,7 @@ fn binarizer< NumberTrait::zero() }; - let mut binarized_data = ArrayTrait::::new(); + let mut binarized_data: Array = array![]; loop { match self.data.pop_front() { @@ -38,5 +34,5 @@ fn binarizer< }; }; - return TensorTrait::new(self.shape, binarized_data.span()); + TensorTrait::new(self.shape, binarized_data.span()) } diff --git a/src/operators/tensor/math/bitwise_and.cairo b/src/operators/tensor/math/bitwise_and.cairo index e3487568b..f7e013218 100644 --- a/src/operators/tensor/math/bitwise_and.cairo +++ b/src/operators/tensor/math/bitwise_and.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +16,12 @@ fn bitwise_and< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -40,10 +35,7 @@ fn bitwise_and< // result.append(res); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/bitwise_or.cairo b/src/operators/tensor/math/bitwise_or.cairo index 8869422d9..eaef9f492 100644 --- a/src/operators/tensor/math/bitwise_or.cairo +++ b/src/operators/tensor/math/bitwise_or.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +16,12 @@ fn bitwise_or< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +33,7 @@ fn bitwise_or< result.append(NumberTrait::bitwise_or(lhs, rhs)); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/bitwise_xor.cairo b/src/operators/tensor/math/bitwise_xor.cairo index 934fa750f..a547465c0 100644 --- a/src/operators/tensor/math/bitwise_xor.cairo +++ b/src/operators/tensor/math/bitwise_xor.cairo @@ -1,8 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +16,12 @@ fn bitwise_xor< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +33,7 @@ fn bitwise_xor< result.append(NumberTrait::bitwise_xor(lhs, rhs)); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::::new(broadcasted_shape, result.span()); + TensorTrait::::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/blackman_window.cairo b/src/operators/tensor/math/blackman_window.cairo index 29f4d2903..217d03903 100644 --- a/src/operators/tensor/math/blackman_window.cairo +++ b/src/operators/tensor/math/blackman_window.cairo @@ -1,15 +1,6 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn blackman_window< T, @@ -25,7 +16,9 @@ fn blackman_window< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(size: T, PI: T, periodic: Option) -> Tensor { +>( + size: T, PI: T, periodic: Option +) -> Tensor { let start: T = NumberTrait::zero(); let one_step: T = NumberTrait::one(); let two: T = one_step + one_step; @@ -36,74 +29,66 @@ fn blackman_window< let n_0_5: T = (one_step - two) / two; let ni = TensorTrait::range(start, size, one_step); - assert!((ni.shape).len() == 1, "Unexpected shape 1."); + assert((ni.shape).len() == 1, 'Unexpected shape 1.'); let mut N_1 = size; + if periodic != Option::Some(1) { N_1 = N_1 - one_step; }; + let len = *(ni.shape).at(0); - let mut arr1: Array = ArrayTrait::::new(); + let mut arr1: Array = array![]; let mut i: usize = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = (v * (PI * two)) / N_1; arr1.append(r); i += 1; - if i >= len { - break (); - }; }; + let window_cos = TensorTrait::::new(ni.shape, arr1.span()).cos(); i = 0; - let mut a1: Array = ArrayTrait::::new(); - loop { + let mut a1: Array = array![]; + while i != len { let v = *(window_cos.data).at(i); let r = v * n_0_5; a1.append(r); i += 1; - if i >= len { - break (); - }; }; + let window1 = TensorTrait::::new(ni.shape, a1.span()); - let mut arr2: Array = ArrayTrait::::new(); + let mut arr2: Array = array![]; i = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = v * (PI * two * two) / N_1; arr2.append(r); i += 1; - if i >= len { - break (); - }; }; + let window_cos_2 = TensorTrait::::new(ni.shape, arr2.span()).cos(); - let mut a2: Array = ArrayTrait::::new(); + let mut a2: Array = array![]; i = 0; - loop { + while i != len { let v = *(window_cos_2.data).at(i); let r = v * beta + alpha; a2.append(r); i += 1; - if i >= len { - break (); - }; }; + let window2 = TensorTrait::::new(ni.shape, a2.span()); - let mut arr: Array = ArrayTrait::::new(); + let mut arr: Array = array![]; i = 0; - loop { + while i != len { let v1 = *(window1.data).at(i); let v2 = *(window2.data).at(i); let r = v1 + v2; arr.append(r); i += 1; - if i >= len { - break (); - }; }; - return TensorTrait::::new(ni.shape, arr.span()); + + TensorTrait::::new(ni.shape, arr.span()) } diff --git a/src/operators/tensor/math/ceil.cairo b/src/operators/tensor/math/ceil.cairo index b6448b11d..8ee604ab3 100644 --- a/src/operators/tensor/math/ceil.cairo +++ b/src/operators/tensor/math/ceil.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -16,7 +12,7 @@ fn ceil< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -25,6 +21,6 @@ fn ceil< }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/compress.cairo b/src/operators/tensor/math/compress.cairo index 80a4f7648..86793b187 100644 --- a/src/operators/tensor/math/compress.cairo +++ b/src/operators/tensor/math/compress.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; use orion::numbers::NumberTrait; use orion::operators::tensor::U32TensorPartialEq; @@ -33,9 +24,9 @@ fn compress, impl TCopy: Copy, impl TDro assert(*data_shape.at(axis) >= condition.data.len(), 'index out of bound'); } - let mut output_shape = ArrayTrait::new(); - let mut index_data = ArrayTrait::new(); - let mut output_data = ArrayTrait::new(); + let mut output_shape = array![]; + let mut index_data = array![]; + let mut output_data = array![]; let mut condition_data = condition.data; @@ -153,5 +144,6 @@ fn compress, impl TCopy: Copy, impl TDro } let mut output_tensor = TensorTrait::::new(output_shape.span(), output_data.span()); - return output_tensor; + + output_tensor } diff --git a/src/operators/tensor/math/concat.cairo b/src/operators/tensor/math/concat.cairo index 1826d8d69..381aa7b5a 100644 --- a/src/operators/tensor/math/concat.cairo +++ b/src/operators/tensor/math/concat.cairo @@ -1,13 +1,6 @@ -use core::clone::Clone; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; -use core::debug::PrintTrait; -use core::traits::Into; - use orion::operators::tensor::helpers::replace_index; use orion::operators::tensor::{TensorTrait, Tensor}; - fn concat, impl TCopy: Copy, impl TDrop: Drop,>( mut tensors: Span>, axis: usize ) -> Tensor { @@ -59,7 +52,7 @@ fn validate_shapes(mut tensors: Span>, mut base_shape: Span, fn compute_output_size( mut base_shape: Span, mut tensors: Span>, axis: usize ) -> Array { - let mut output_size = ArrayTrait::::new(); + let mut output_size: Array = array![]; let mut axis_size = 0; loop { @@ -90,16 +83,12 @@ fn compute_output_size( fn concatenate_data, impl TDrop: Drop,>( mut tensors: Span>, axis: usize, base_shape: Span ) -> Array { - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let total_loops = product_upto(base_shape, axis); let mut outer_loop_index = 0; - loop { - if outer_loop_index == total_loops { - break; - } - + while outer_loop_index != total_loops { let mut tensors_copy = tensors; loop { match tensors_copy.pop_front() { @@ -107,11 +96,7 @@ fn concatenate_data, impl TDrop: Drop,>( let slice_len = (*tensor.data).len() / total_loops; let mut inner_index = 0; - loop { - if inner_index == slice_len { - break; - } - + while inner_index != slice_len { output_data .append(*(*tensor.data).at(slice_len * outer_loop_index + inner_index)); inner_index += 1; diff --git a/src/operators/tensor/math/cos.cairo b/src/operators/tensor/math/cos.cairo index 943b6528b..c37e95618 100644 --- a/src/operators/tensor/math/cos.cairo +++ b/src/operators/tensor/math/cos.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::cos docstring fn cos< T, @@ -19,7 +13,7 @@ fn cos< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn cos< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/cosh.cairo b/src/operators/tensor/math/cosh.cairo index df8a7b40c..2133e3e8e 100644 --- a/src/operators/tensor/math/cosh.cairo +++ b/src/operators/tensor/math/cosh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::cosh docstring fn cosh< T, @@ -19,7 +13,7 @@ fn cosh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn cosh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/cumsum.cairo b/src/operators/tensor/math/cumsum.cairo index 99aea3156..6fef885d2 100644 --- a/src/operators/tensor/math/cumsum.cairo +++ b/src/operators/tensor/math/cumsum.cairo @@ -1,11 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::debug::PrintTrait; - +use orion::numbers::NumberTrait; use orion::operators::tensor::helpers::replace_index; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::numbers::NumberTrait; /// Cf: TensorTrait::cumsum docstring fn cumsum< @@ -52,17 +47,14 @@ fn cumsum_forward< let data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut index: usize = 0; - loop { - if index == data.len() { - break (); - }; - + while index != data.len() { let current_indices = unravel_index(index, *self.shape); let axis_value = *current_indices[axis]; + if axis_value == 0 { if exclusive { output_data.append(zero); @@ -91,10 +83,9 @@ fn cumsum_forward< index += 1; }; - return TensorTrait::::new(*self.shape, output_data.span()); + TensorTrait::::new(*self.shape, output_data.span()) } - /// Cf: TensorTrait::cumsum docstring fn cumsum_reverse< T, @@ -113,20 +104,15 @@ fn cumsum_reverse< assert(axis < (*self.shape).len(), 'axis out of dimensions'); let data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut index: usize = 0; - loop { - if index == data.len() { - break (); - }; - + while index != data.len() { let current_indices = unravel_index(index, *self.shape); let mut axis_value = *current_indices[axis]; if axis_value == 0 { // If the axis value is 0, we need to sum all the elements // in the axis. - let mut sum = *(data)[index]; if exclusive { sum = zero; @@ -144,6 +130,7 @@ fn cumsum_reverse< let next_axis_element_index = ravel_index(*self.shape, next_axis_element_indices); sum += *data[next_axis_element_index]; }; + output_data.append(sum); } else { // If the axis value is not 0, we only need to do a subtraction @@ -168,5 +155,5 @@ fn cumsum_reverse< index += 1; }; - return TensorTrait::::new(*self.shape, output_data.span()); + TensorTrait::::new(*self.shape, output_data.span()) } diff --git a/src/operators/tensor/math/equal.cairo b/src/operators/tensor/math/equal.cairo index e3f884acd..d2693acf9 100644 --- a/src/operators/tensor/math/equal.cairo +++ b/src/operators/tensor/math/equal.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn equal< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn equal< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/erf.cairo b/src/operators/tensor/math/erf.cairo index 8cc8ab055..545ff789d 100644 --- a/src/operators/tensor/math/erf.cairo +++ b/src/operators/tensor/math/erf.cairo @@ -1,11 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: TensorTrait::erf docstring fn erf< @@ -18,7 +13,7 @@ fn erf< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -27,5 +22,5 @@ fn erf< }; }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/exp.cairo b/src/operators/tensor/math/exp.cairo index 0c1700abf..c3e74168b 100644 --- a/src/operators/tensor/math/exp.cairo +++ b/src/operators/tensor/math/exp.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::{Into, TryInto}; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::exp docstring fn exp< T, @@ -19,7 +13,7 @@ fn exp< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,7 +22,7 @@ fn exp< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } /// Cf: TensorTrait::exp docstring @@ -49,7 +43,7 @@ fn exp_upcast< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -58,5 +52,5 @@ fn exp_upcast< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/flatten.cairo b/src/operators/tensor/math/flatten.cairo index d8e5b5583..a23671b77 100644 --- a/src/operators/tensor/math/flatten.cairo +++ b/src/operators/tensor/math/flatten.cairo @@ -1,9 +1,5 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::flatten docstring fn flatten>(self: @Tensor, axis: usize) -> Tensor { let mut shape = *self.shape; @@ -27,5 +23,5 @@ fn flatten>(self: @Tensor, axis: usize) let new_shape_second_axis = (*self.data).len() / new_shape_first_axis; - return self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()); + self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()) } diff --git a/src/operators/tensor/math/gather.cairo b/src/operators/tensor/math/gather.cairo index 93662868b..a60e927ab 100644 --- a/src/operators/tensor/math/gather.cairo +++ b/src/operators/tensor/math/gather.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor}; @@ -26,14 +17,14 @@ fn gather, impl TCopy: Copy, impl TDrop: let ind_max = indices.data.max().unwrap(); assert(ind_max < axis_shape, 'this index out of bounds'); - let mut output_data = ArrayTrait::new(); - let mut output_size = ArrayTrait::new(); + let mut output_data = array![]; + let mut output_size = array![]; let mut self_shape = *self.shape; let mut i: usize = 0; loop { match self_shape.pop_front() { Option::Some(val) => { - if (i == axis) { + if i == axis { let mut indices_shape = indices.shape; loop { match indices_shape.pop_front() { @@ -44,6 +35,7 @@ fn gather, impl TCopy: Copy, impl TDrop: } else { output_size.append(*val); } + i += 1; }, Option::None => { break; } @@ -58,10 +50,11 @@ fn gather, impl TCopy: Copy, impl TDrop: loop { match self_shape.pop_front() { Option::Some(val) => { - if (i == axis) { + if i == axis { divisor /= *val; break (); }; + outer_loop_break *= *val; divisor /= *val; i += 1; @@ -86,25 +79,18 @@ fn gather, impl TCopy: Copy, impl TDrop: let mut outer_loop: usize = 0; let axis_index = *self.shape[axis]; - loop { - if outer_loop == outer_loop_break { - break; - } - + while outer_loop != outer_loop_break { let mut data_indices = indices.data; loop { match data_indices.pop_front() { Option::Some(indice) => { let mut inner_loop = 0; - loop { - if inner_loop == break_loop { - break; - } - + while inner_loop != break_loop { let new_val = inner_loop / divisor % axis_index; if *indice == new_val { output_data.append(*self.data[break_loop * outer_loop + inner_loop]); } + inner_loop += 1; } }, @@ -117,5 +103,5 @@ fn gather, impl TCopy: Copy, impl TDrop: let mut output_tensor = TensorTrait::::new(output_size.span(), output_data.span()); - return output_tensor; + output_tensor } diff --git a/src/operators/tensor/math/gather_elements.cairo b/src/operators/tensor/math/gather_elements.cairo index f34e3e6b3..c3793a316 100644 --- a/src/operators/tensor/math/gather_elements.cairo +++ b/src/operators/tensor/math/gather_elements.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::U32TensorPartialEq; @@ -48,7 +39,7 @@ fn gather_elements, impl TCopy: Copy, im }; }; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut outer_loop = data_shape_clone.at(axis); let mut inner_loop = 1; @@ -61,6 +52,7 @@ fn gather_elements, impl TCopy: Copy, im if (ind >= axis) { multiplier *= *val; } + ind += 1; }, Option::None => { break; } @@ -82,6 +74,7 @@ fn gather_elements, impl TCopy: Copy, im if (ind >= axis) { multiplier_index *= *val; } + ind += 1; }, Option::None => { break; } @@ -97,16 +90,19 @@ fn gather_elements, impl TCopy: Copy, im let value = *val * inner_loop.into() + (i % inner_loop); output_data.append(*self.data[value]); } + if ((axis == indices_rank - 1) & (axis != 0)) { let value = *val + *outer_loop * (i / *outer_loop_index); output_data.append(*self.data[value]); } + if ((axis != indices_rank - 1) & (axis != 0)) { let value = *val * (looper) + (i % looper) + (multiplier * (i / multiplier_index)); output_data.append(*self.data[value]); } + i += 1; }, Option::None => { break; } @@ -114,5 +110,6 @@ fn gather_elements, impl TCopy: Copy, im }; let mut output_tensor = TensorTrait::::new(indices.shape, output_data.span()); - return output_tensor; + + output_tensor } diff --git a/src/operators/tensor/math/gather_nd.cairo b/src/operators/tensor/math/gather_nd.cairo index 5d6c75ce1..e5f340487 100644 --- a/src/operators/tensor/math/gather_nd.cairo +++ b/src/operators/tensor/math/gather_nd.cairo @@ -1,13 +1,4 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; use orion::numbers::NumberTrait; use orion::operators::tensor::U32TensorPartialEq; @@ -37,20 +28,17 @@ fn gather_nd, impl TCopy: Copy, impl TDr 'check indices' ); - let mut batch_dims_shape = ArrayTrait::new(); - let mut output_shape = ArrayTrait::new(); - let mut index_data = ArrayTrait::new(); - let mut output_data = ArrayTrait::new(); + let mut batch_dims_shape = array![]; + let mut output_shape = array![]; + let mut index_data = array![]; + let mut output_data = array![]; let mut batch_dims_size = batch_dims; let mut total_data_len = 1; - let mut multiple_data_len = ArrayTrait::new(); + let mut multiple_data_len = array![]; let mut ind = 0; - loop { - if (ind == batch_dims) { - break (); - } + while ind != batch_dims { match indices_shape_clone.pop_front() { Option::Some(val) => { batch_dims_size *= *val; @@ -79,6 +67,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (ind >= (batch_dims + *indices_shape_last)) { output_shape.append(*val); } + ind += 1; }, Option::None => { break; } @@ -101,6 +90,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (ind >= batch_dims + *indices_shape_last) { incrementer *= *val; } + ind += 1; }, Option::None => { break; } @@ -116,6 +106,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (ind >= batch_dims) { breaker *= *val; } + ind += 1; }, Option::None => { break; } @@ -136,13 +127,11 @@ fn gather_nd, impl TCopy: Copy, impl TDr if (index == *indices_shape_last - 1) { let mut data_ind: usize = result; - loop { - if data_ind == result + incrementer { - break; - } + while data_ind != result + incrementer { index_data.append(data_ind + incr); data_ind += 1; }; + result = 0; }; }, @@ -158,5 +147,6 @@ fn gather_nd, impl TCopy: Copy, impl TDr }; let mut output_tensor = TensorTrait::::new(output_shape.span(), output_data.span()); - return output_tensor; + + output_tensor } diff --git a/src/operators/tensor/math/greater.cairo b/src/operators/tensor/math/greater.cairo index 1177212bc..f90462b22 100644 --- a/src/operators/tensor/math/greater.cairo +++ b/src/operators/tensor/math/greater.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn greater< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn greater< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/greater_equal.cairo b/src/operators/tensor/math/greater_equal.cairo index b7756d12e..bc8e1b045 100644 --- a/src/operators/tensor/math/greater_equal.cairo +++ b/src/operators/tensor/math/greater_equal.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn greater_equal< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn greater_equal< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/hamming_window.cairo b/src/operators/tensor/math/hamming_window.cairo index 216590f09..06539f0fe 100644 --- a/src/operators/tensor/math/hamming_window.cairo +++ b/src/operators/tensor/math/hamming_window.cairo @@ -1,15 +1,6 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn hamming_window< T, @@ -25,7 +16,9 @@ fn hamming_window< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(size: T, PI: T, periodic: Option) -> Tensor { +>( + size: T, PI: T, periodic: Option +) -> Tensor { let start: T = NumberTrait::zero(); let one_step: T = NumberTrait::one(); let two: T = one_step + one_step; @@ -36,37 +29,36 @@ fn hamming_window< let beta: T = one_step - alpha; let ni = TensorTrait::range(start, size, one_step); - assert!((ni.shape).len() == 1, "Unexpected shape 1."); + assert((ni.shape).len() == 1, 'Unexpected shape 1.'); let mut N_1 = size; + if periodic != Option::Some(1) { N_1 = N_1 - one_step; }; + let len = *(ni.shape).at(0); - let mut arr: Array = ArrayTrait::::new(); + let mut arr: Array = array![]; let mut i: usize = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = v * PI * two / N_1; arr.append(r); i += 1; - if i >= len { - break (); - }; }; + let window = TensorTrait::::new(ni.shape, arr.span()); let window_cos = window.cos(); let len2 = *(ni.shape).at(0); - let mut arr2: Array = ArrayTrait::::new(); + let mut arr2: Array = array![]; let mut j: usize = 0; - loop { + while j != len2 { let v = *(window_cos.data).at(j); let v_2 = alpha - v * beta; arr2.append(v_2); j += 1; - if j >= len2 { - break (); - }; }; + let window_cos_2 = TensorTrait::::new(ni.shape, arr2.span()); - return window_cos_2; + + window_cos_2 } diff --git a/src/operators/tensor/math/hann_window.cairo b/src/operators/tensor/math/hann_window.cairo index 05aa3b923..4fc7a801f 100644 --- a/src/operators/tensor/math/hann_window.cairo +++ b/src/operators/tensor/math/hann_window.cairo @@ -1,15 +1,6 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn hann_window< T, @@ -25,41 +16,42 @@ fn hann_window< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(size: T, PI: T, periodic: Option) -> Tensor { +>( + size: T, PI: T, periodic: Option +) -> Tensor { let start: T = NumberTrait::zero(); let one_step: T = NumberTrait::one(); let ni = TensorTrait::range(start, size, one_step); - assert!((ni.shape).len() == 1, "Unexpected shape 1."); + assert((ni.shape).len() == 1, 'Unexpected shape 1.'); let mut N_1 = size; + if periodic != Option::Some(1) { N_1 = N_1 - one_step; }; + let len = *(ni.shape).at(0); - let mut arr: Array = ArrayTrait::::new(); + let mut arr: Array = array![]; let mut i: usize = 0; - loop { + while i != len { let v = *(ni.data).at(i); let r = v * PI / N_1; arr.append(r); i += 1; - if i >= len { - break (); - }; }; + let window = TensorTrait::::new(ni.shape, arr.span()); let window_sin = window.sin(); let len2 = *(ni.shape).at(0); - let mut arr2: Array = ArrayTrait::::new(); + let mut arr2: Array = array![]; let mut j: usize = 0; - loop { + while j != len2 { let v = *(window_sin.data).at(j); let v_2 = v * v; arr2.append(v_2); j += 1; - if j >= len2 { - break (); - }; }; + let window_sin_2 = TensorTrait::::new(ni.shape, arr2.span()); - return window_sin_2; + + window_sin_2 } diff --git a/src/operators/tensor/math/is_inf.cairo b/src/operators/tensor/math/is_inf.cairo index d3a5f8f4f..021b10732 100644 --- a/src/operators/tensor/math/is_inf.cairo +++ b/src/operators/tensor/math/is_inf.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::implementations::tensor_bool::BoolTensor; @@ -47,7 +43,7 @@ fn is_inf< return is_neg_inf(x); } - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -56,7 +52,7 @@ fn is_inf< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } /// Cf: TensorTrait::is_pos_inf docstring @@ -70,7 +66,7 @@ fn is_pos_inf< >( x: @Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -79,7 +75,7 @@ fn is_pos_inf< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } /// Cf: TensorTrait::is_neg_inf docstring @@ -93,7 +89,7 @@ fn is_neg_inf< >( x: @Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -102,5 +98,5 @@ fn is_neg_inf< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } diff --git a/src/operators/tensor/math/is_nan.cairo b/src/operators/tensor/math/is_nan.cairo index 817cf5f4d..2f1818a81 100644 --- a/src/operators/tensor/math/is_nan.cairo +++ b/src/operators/tensor/math/is_nan.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::implementations::tensor_bool::BoolTensor; @@ -17,7 +13,7 @@ fn is_nan< >( x: @Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { @@ -26,5 +22,5 @@ fn is_nan< }; }; - return TensorTrait::new(*x.shape, data_result.span()); + TensorTrait::new(*x.shape, data_result.span()) } diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 1417b7e2b..e61e826f5 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -1,17 +1,10 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; use orion::numbers::{NumberTrait, I32IntoU32}; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; - /// Cf: TensorTrait::layer_normalization docstring fn layer_normalization< T, @@ -52,20 +45,15 @@ fn layer_normalization< }; let unsqueezed_rank = X_rank - axis; - let mut reduction_shape = ArrayTrait::new(); + let mut reduction_shape = array![]; let mut i = 0; - loop { - if i == axis { - break; - } + while i != axis { reduction_shape.append(*(*self).shape.at(i)); i += 1; }; + let mut i = 0; - loop { - if i == unsqueezed_rank { - break; - } + while i != unsqueezed_rank { reduction_shape.append(1); i += 1; }; @@ -73,34 +61,32 @@ fn layer_normalization< let mut row_number = 1; let mut col_number = 1; let mut i = 0; - loop { - if i == X_rank { - break; - } + while i != X_rank { if i < axis { row_number *= *(*self).shape.at(i); } else { col_number *= *(*self).shape.at(i); } + i += 1; }; - let mut shape_matrix = ArrayTrait::new(); + let mut shape_matrix = array![]; shape_matrix.append(row_number); shape_matrix.append(col_number); // Shape [1, 1] to mutiply one element tensors with 2D matrices - let mut shape_one = ArrayTrait::new(); + let mut shape_one = array![]; shape_one.append(1); shape_one.append(1); - let mut col_number_tensor = ArrayTrait::new(); + let mut col_number_tensor = array![]; col_number_tensor.append(NumberTrait::new_unscaled(col_number.into(), false)); - let mut epsilon_tensor = ArrayTrait::new(); + let mut epsilon_tensor = array![]; epsilon_tensor.append(epsilon); - let mut one_tensor = ArrayTrait::new(); + let mut one_tensor = array![]; one_tensor.append(NumberTrait::one()); let x_mat = self.reshape(shape_matrix.span()); @@ -122,23 +108,19 @@ fn layer_normalization< let scale = if (*scale).shape.len() < (*self).shape.len() { // Append 1 in scale shape to make sure scale has a dimension compatible with Y for multiplication - let mut shape = ArrayTrait::new(); + let mut shape = array![]; let mut i = 0; - loop { - if i == (*self).shape.len() - (*scale).shape.len() { - break; - } + while i != (*self).shape.len() - (*scale).shape.len() { shape.append(1); i += 1; }; + let mut i = 0; - loop { - if i == (*scale).shape.len() { - break; - } + while i != (*scale).shape.len() { shape.append(*(*scale).shape.at(i)); i += 1; }; + TensorTrait::new(shape.span(), (*scale).data) } else { *scale @@ -150,23 +132,19 @@ fn layer_normalization< Option::Some(B) => { let B = if (*B).shape.len() < (*self).shape.len() { // Append 1 in B shape to make sure scale has a dimension compatible with Y for multiplication - let mut shape = ArrayTrait::new(); + let mut shape = array![]; let mut i = 0; - loop { - if i == (*self).shape.len() - (*B).shape.len() { - break; - } + while i != (*self).shape.len() - (*B).shape.len() { shape.append(1); i += 1; }; + let mut i = 0; - loop { - if i == (*B).shape.len() { - break; - } + while i != (*B).shape.len() { shape.append(*(*B).shape.at(i)); i += 1; }; + TensorTrait::new(shape.span(), (*B).data) } else { *B @@ -179,6 +157,6 @@ fn layer_normalization< let X_mean = TensorTrait::new(reduction_shape.span(), x_mean.data); let X_inv_std_dev = TensorTrait::new(reduction_shape.span(), inv_std_dev.data); - return (Y, X_mean, X_inv_std_dev); + (Y, X_mean, X_inv_std_dev) } diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index b9cc0370f..35f9b4d73 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn less< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn less< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/less_equal.cairo b/src/operators/tensor/math/less_equal.cairo index 48316ed97..8c982a09c 100644 --- a/src/operators/tensor/math/less_equal.cairo +++ b/src/operators/tensor/math/less_equal.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility @@ -18,12 +14,12 @@ fn less_equal< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -36,10 +32,7 @@ fn less_equal< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/log.cairo b/src/operators/tensor/math/log.cairo index fa153c61b..3b7ae0823 100644 --- a/src/operators/tensor/math/log.cairo +++ b/src/operators/tensor/math/log.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::log docstring fn log< T, @@ -19,7 +13,7 @@ fn log< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn log< }; }; - return TensorTrait::::new(self.shape, result.span()); + TensorTrait::::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/max.cairo b/src/operators/tensor/math/max.cairo index 245099179..3ce6d4919 100644 --- a/src/operators/tensor/math/max.cairo +++ b/src/operators/tensor/math/max.cairo @@ -1,5 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -30,12 +28,8 @@ fn max< let mut tensor_counter: usize = 1; - loop { - if tensor_counter > tensors.len() - 1 { - break; - } - - let mut new_max_data = ArrayTrait::::new(); + while tensor_counter != tensors.len() { + let mut new_max_data: Array = array![]; let mut current_tensor = *tensors.at(tensor_counter); @@ -43,7 +37,7 @@ fn max< let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let mut indices_broadcasted = unravel_index(n, broadcasted_shape); let mut indices_self = broadcast_index_mapping(max_shape, indices_broadcasted); @@ -57,9 +51,6 @@ fn max< new_max_data.append(max_value); n += 1; - if n == num_elements { - break (); - }; }; max_shape = broadcasted_shape; @@ -67,5 +58,5 @@ fn max< tensor_counter += 1; }; - return TensorTrait::::new(max_shape, max_data); + TensorTrait::::new(max_shape, max_data) } diff --git a/src/operators/tensor/math/max_in_tensor.cairo b/src/operators/tensor/math/max_in_tensor.cairo index f1aabdafb..8f1622813 100644 --- a/src/operators/tensor/math/max_in_tensor.cairo +++ b/src/operators/tensor/math/max_in_tensor.cairo @@ -1,6 +1,3 @@ -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; /// Cf: TensorTrait::max_in_tensor docstring @@ -28,5 +25,5 @@ fn max_in_tensor< }; }; - return max_value; + max_value } diff --git a/src/operators/tensor/math/min.cairo b/src/operators/tensor/math/min.cairo index a4ae655eb..2e7acadab 100644 --- a/src/operators/tensor/math/min.cairo +++ b/src/operators/tensor/math/min.cairo @@ -1,5 +1,3 @@ -use core::array::{ArrayTrait, SpanTrait}; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -30,12 +28,8 @@ fn min< let mut tensor_counter: usize = 1; - loop { - if tensor_counter > tensors.len() - 1 { - break; - } - - let mut new_min_data = ArrayTrait::::new(); + while tensor_counter != tensors.len() { + let mut new_min_data: Array = array![]; let mut current_tensor = *tensors.at(tensor_counter); @@ -43,7 +37,7 @@ fn min< let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let mut indices_broadcasted = unravel_index(n, broadcasted_shape); let mut indices_self = broadcast_index_mapping(min_shape, indices_broadcasted); @@ -57,9 +51,6 @@ fn min< new_min_data.append(min_value); n += 1; - if n == num_elements { - break (); - }; }; min_shape = broadcasted_shape; @@ -67,5 +58,5 @@ fn min< tensor_counter += 1; }; - return TensorTrait::::new(min_shape, min_data); + TensorTrait::::new(min_shape, min_data) } diff --git a/src/operators/tensor/math/min_in_tensor.cairo b/src/operators/tensor/math/min_in_tensor.cairo index efa4356e5..854ac9a7f 100644 --- a/src/operators/tensor/math/min_in_tensor.cairo +++ b/src/operators/tensor/math/min_in_tensor.cairo @@ -1,6 +1,3 @@ -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; /// Cf: TensorTrait::min_in_tensor docstring @@ -28,5 +25,5 @@ fn min_in_tensor< }; }; - return min_value; + min_value } diff --git a/src/operators/tensor/math/neg.cairo b/src/operators/tensor/math/neg.cairo index 0eaa8b3da..71f358be0 100644 --- a/src/operators/tensor/math/neg.cairo +++ b/src/operators/tensor/math/neg.cairo @@ -1,9 +1,5 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: TensorTrait::neg docstring fn neg< @@ -16,7 +12,7 @@ fn neg< >( mut z: Tensor ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).neg()); }, @@ -24,5 +20,5 @@ fn neg< }; }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/not.cairo b/src/operators/tensor/math/not.cairo index 93e25c525..37f9561ec 100644 --- a/src/operators/tensor/math/not.cairo +++ b/src/operators/tensor/math/not.cairo @@ -1,15 +1,10 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::implementations::{tensor_bool::BoolTensor}; - // Cf TensorTrait::not docstring fn not(mut z: Tensor) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match z.data.pop_front() { @@ -18,5 +13,5 @@ fn not(mut z: Tensor) -> Tensor { }; }; - return TensorTrait::new(z.shape, data_result.span()); + TensorTrait::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/onehot.cairo b/src/operators/tensor/math/onehot.cairo index bad9c9ef0..f5c48a9ef 100644 --- a/src/operators/tensor/math/onehot.cairo +++ b/src/operators/tensor/math/onehot.cairo @@ -1,18 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: TensorTrait::onehot docstring fn onehot_encode< T, @@ -40,8 +29,8 @@ fn onehot_encode< assert(((axis == 999) | (axis.into() <= rank)), 'axis out of dimensions'); - let mut output_data = ArrayTrait::new(); - let mut output_size = ArrayTrait::::new(); + let mut output_data = array![]; + let mut output_size: Array = array![]; // New shape for output data loop { @@ -65,10 +54,7 @@ fn onehot_encode< } let mut inner_index = 0; - loop { - if inner_index == depth { - break (); - }; + while inner_index != depth { let ind = FixedTrait::< T, MAG >::new_unscaled(inner_index.try_into().unwrap(), false); @@ -87,7 +73,7 @@ fn onehot_encode< }; let mut output_tensor = TensorTrait::new(output_size.span(), output_data.span()); - let mut tranpose_axes = ArrayTrait::new(); + let mut tranpose_axes = array![]; // Get New shape is axis is not last dimension if (axis != 999) & (axis.into() != rank) { let mut index: usize = 0; @@ -104,11 +90,10 @@ fn onehot_encode< index += 1; }; - output_tensor = output_tensor.transpose(tranpose_axes.span()); } - return output_tensor; + output_tensor } fn onehot< @@ -127,16 +112,17 @@ fn onehot< ) -> Tensor { assert(values.len() == 2, 'Wrong values dimensions'); - let mut sizes = ArrayTrait::new(); + let mut sizes = array![]; sizes.append(2); let mut first = *values.pop_front().unwrap(); let mut second = *values.pop_front().unwrap(); - let mut data = ArrayTrait::new(); + let mut data = array![]; data.append(FixedTrait::::new_unscaled(first.try_into().unwrap(), false)); data.append(FixedTrait::::new_unscaled(second.try_into().unwrap(), false)); let values = TensorTrait::new(sizes.span(), data.span()); + onehot_encode(self, depth, axis, values) } diff --git a/src/operators/tensor/math/optional_get_element.cairo b/src/operators/tensor/math/optional_get_element.cairo index 3af112f85..eef73bc17 100644 --- a/src/operators/tensor/math/optional_get_element.cairo +++ b/src/operators/tensor/math/optional_get_element.cairo @@ -1,9 +1,5 @@ -use core::array::ArrayTrait; -use option::OptionTrait; -use core::array::SpanTrait; - -use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait}; /// Cf: TensorTrait::optional_get_element docstring fn optional_get_element< @@ -16,7 +12,7 @@ fn optional_get_element< >( mut z: Tensor, index: usize ) -> Tensor { - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; // use of match to get element within and out the array bound match z.data.get(index) { @@ -24,5 +20,5 @@ fn optional_get_element< Option::None => {} }; - return TensorTrait::::new(z.shape, data_result.span()); + TensorTrait::::new(z.shape, data_result.span()) } diff --git a/src/operators/tensor/math/or.cairo b/src/operators/tensor/math/or.cairo index 60d10c95a..13b4697a3 100644 --- a/src/operators/tensor/math/or.cairo +++ b/src/operators/tensor/math/or.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -20,12 +16,12 @@ fn or< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +34,7 @@ fn or< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/pow.cairo b/src/operators/tensor/math/pow.cairo index a5b15579b..b02097551 100644 --- a/src/operators/tensor/math/pow.cairo +++ b/src/operators/tensor/math/pow.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape}; @@ -18,12 +14,12 @@ fn pow< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -32,10 +28,7 @@ fn pow< result.append(NumberTrait::pow(*(*y.data)[indices_self], *(*z.data)[indices_other])); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/random_uniform_like.cairo b/src/operators/tensor/math/random_uniform_like.cairo index 0b9c06cda..61d993dda 100644 --- a/src/operators/tensor/math/random_uniform_like.cairo +++ b/src/operators/tensor/math/random_uniform_like.cairo @@ -1,17 +1,12 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::option::OptionTrait; +use core::integer; + +use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; -use core::traits::PartialEq; -use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; -use core::integer::{u128s_from_felt252, U128sFromFelt252Result}; -use core::traits; /// Cf: TensorTrait::random_uniform_like docstring fn random_uniform_like< @@ -30,8 +25,9 @@ fn random_uniform_like< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(tensor: Tensor, high: Option, low: Option, seed:Option) -> Tensor { - +>( + tensor: Tensor, high: Option, low: Option, seed: Option +) -> Tensor { let mut seed: usize = match seed { Option::Some(seed) => seed, Option::None => NumberTrait::max_value(), @@ -45,11 +41,10 @@ fn random_uniform_like< Option::None => NumberTrait::zero(), }; assert!(high > low, "high must be larger than low"); - let res = tensor_get_state(tensor,seed,high,low); - - return res; -} + let res = tensor_get_state(tensor, seed, high, low); + res +} fn tensor_get_state< T, @@ -67,15 +62,14 @@ fn tensor_get_state< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(tensor: Tensor, mut seed: usize, high: T, low: T) -> Tensor { - let mut data = ArrayTrait::new(); +>( + tensor: Tensor, mut seed: usize, high: T, low: T +) -> Tensor { + let mut data = array![]; let mut count = (tensor.data).len(); let mut i = 0; - loop { - if count == i { - break; - } + while count != i { let mut v = NumberTrait::one(); v = hash_random_range(seed, low, high); let a: u64 = 1664525; @@ -86,7 +80,8 @@ fn tensor_get_state< data.append(v); i += 1; }; - return TensorTrait::new(tensor.shape, data.span()); + + TensorTrait::new(tensor.shape, data.span()) } // High level random in a range @@ -105,16 +100,19 @@ fn hash_random_range< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(seed: usize, min: T, max: T) -> T { +>( + seed: usize, min: T, max: T +) -> T { let mut key = PedersenHasherImpl::new(); let hash: felt252 = key.hash(seed.into(), 1); let a: u128 = 4294967295; - let b: u128 = match u128s_from_felt252(hash) { - U128sFromFelt252Result::Narrow(x) => x, - U128sFromFelt252Result::Wide((x, _)) => x, + let b: u128 = match integer::u128s_from_felt252(hash) { + integer::U128sFromFelt252Result::Narrow(x) => x, + integer::U128sFromFelt252Result::Wide((x, _)) => x, } % a; let c: felt252 = b.into(); let rnd: T = NumberTrait::from_felt(c); let range = max - min + NumberTrait::one(); // + 1 to include max + min + rnd % range } diff --git a/src/operators/tensor/math/range.cairo b/src/operators/tensor/math/range.cairo index a21f7f2b0..1edc0f628 100644 --- a/src/operators/tensor/math/range.cairo +++ b/src/operators/tensor/math/range.cairo @@ -1,15 +1,5 @@ -use core::traits::Into; -use core::traits::TryInto; -use orion::operators::tensor::core::{Tensor, TensorTrait}; -use core::array::{ArrayTrait, SpanTrait}; -use core::option::OptionTrait; - -use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; - -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; - +use orion::operators::tensor::core::{Tensor, TensorTrait}; fn range< T, @@ -23,17 +13,18 @@ fn range< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, ->(mut start: T, end: T, step: T) -> Tensor { - let mut result: Array = ArrayTrait::::new(); +>( + mut start: T, end: T, step: T +) -> Tensor { + let mut result: Array = array![]; let zero: T = NumberTrait::zero(); - loop { - if (step >= zero && start >= end) || (step <= zero && start <= end) { - break (); - }; + while !(step >= zero && start >= end) && !(step <= zero && start <= end) { let v = start; result.append(v); start += step; }; + let shape = array![result.len()]; - return TensorTrait::::new(shape.span(), result.span()); + + TensorTrait::::new(shape.span(), result.span()) } diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index 813101eb5..ba2be9215 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -1,11 +1,7 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; + /// Cf: TensorTrait::reduce_sum docstring fn reduce_l1< T, @@ -19,5 +15,6 @@ fn reduce_l1< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let data_abs = self.abs(); - return data_abs.reduce_sum(axis: axis, keepdims: keepdims); + + data_abs.reduce_sum(axis: axis, keepdims: keepdims) } diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index 8bb5bc888..96f4b7245 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -1,11 +1,8 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; use core::debug::PrintTrait; use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; fn square< T, @@ -19,7 +16,7 @@ fn square< self: @Tensor ) -> Tensor { let mut data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; loop { match data.pop_front() { @@ -32,8 +29,10 @@ fn square< }; let tensor_square = TensorTrait::new(*self.shape, output_data.span()); - return tensor_square; + + tensor_square } + /// Cf: TensorTrait::reduce_l2 docstring fn reduce_l2< T, @@ -48,7 +47,8 @@ fn reduce_l2< ) -> Tensor { let tensor_square = square(self); let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); - return tensor_square_sum.sqrt(); + + tensor_square_sum.sqrt() } fn reduce_l2_complex< @@ -64,9 +64,8 @@ fn reduce_l2_complex< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let mut tensor_square = square(@self.abs()); - let mut tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); - return tensor_square_sum.sqrt(); + tensor_square_sum.sqrt() } diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 91019ebc0..60a5225cb 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -1,11 +1,6 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; /// Cf: TensorTrait::reduce_sum_square docstring fn reduce_log_sum< @@ -23,5 +18,5 @@ fn reduce_log_sum< let tensor_square_sum = self.reduce_sum(axis: axis, keepdims: keepdims); let tensor_square_sum_log = tensor_square_sum.log(); - return tensor_square_sum_log; + tensor_square_sum_log } diff --git a/src/operators/tensor/math/reduce_mean.cairo b/src/operators/tensor/math/reduce_mean.cairo index a692fdb91..44839e19e 100644 --- a/src/operators/tensor/math/reduce_mean.cairo +++ b/src/operators/tensor/math/reduce_mean.cairo @@ -1,10 +1,5 @@ -use core::option::OptionTrait; -use core::traits::Div; -use core::traits::TryInto; -use core::traits::Into; - -use core::array::ArrayTrait; -use core::array::SpanTrait; +use alexandria_sorting::bubble_sort; +use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; @@ -13,10 +8,6 @@ use orion::operators::tensor::helpers::{ reduce_output_shape, len_from_shape, combine_indices, get_all_axes }; -use alexandria_sorting::bubble_sort; -use alexandria_data_structures::array_ext::{SpanTraitExt}; - - /// Cf: TensorTrait::reduce_mean docstring fn reduce_mean< T, @@ -35,7 +26,7 @@ fn reduce_mean< ) -> Tensor { let noop_with_empty_axes = match noop_with_empty_axes { Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None => { false }, + Option::None => false, }; let axes = match axes { Option::Some(axes) => { @@ -43,7 +34,7 @@ fn reduce_mean< get_all_axes(*self.shape) } else { assert(axes.len() == axes.unique().len(), 'duplicated axis.'); - let mut axes_arr = ArrayTrait::new(); + let mut axes_arr = array![]; let mut copy_axes = axes; loop { match copy_axes.pop_front() { @@ -56,7 +47,7 @@ fn reduce_mean< } }, Option::None => { - if (noop_with_empty_axes == true) { + if noop_with_empty_axes { return *self; } get_all_axes(*self.shape) @@ -64,7 +55,7 @@ fn reduce_mean< }; let keepdims = match keepdims { Option::Some(keepdims) => keepdims, - Option::None => { true }, + Option::None => true, }; let mut axis_c = 0; @@ -80,21 +71,19 @@ fn reduce_mean< data = array![current_mean].span(); break (); } - let mut temp_data = ArrayTrait::new(); + let mut temp_data = array![]; let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); let data_len = len_from_shape(temp_shape); let mut index: usize = 0; - loop { + while index != data_len { let indices = unravel_index(index, temp_shape); let current_mean = accumulate_mean::(data, shape, indices, *axis - axis_c); temp_data.append(current_mean); index += 1; - if index == data_len { - break (); - }; }; + shape = temp_shape; data = temp_data.span(); axis_c += 1; @@ -104,7 +93,7 @@ fn reduce_mean< }; let mut axes_copy = axes; - if keepdims == true { + if keepdims { shape = *self.shape; loop { match axes_copy.pop_front() { @@ -112,9 +101,10 @@ fn reduce_mean< Option::None => { break; } }; }; - return TensorTrait::::new(shape, data); + + TensorTrait::::new(shape, data) } else { - return TensorTrait::::new(shape, data); + TensorTrait::::new(shape, data) } } @@ -149,11 +139,7 @@ fn accumulate_mean< let mut axis_indexu32 = 0; if (input_shape).len() > 1 { - loop { - if axis_indexu32 == axis_len { - break (); - } - + while axis_indexu32 != axis_len { let input_indices = combine_indices(output_indices, axis_indexu32, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; @@ -174,5 +160,5 @@ fn accumulate_mean< }; } // let axis_index: T = NumberTrait::::new(axis_index.try_into().unwrap(), false); - return acc / axis_index; + acc / axis_index } diff --git a/src/operators/tensor/math/reduce_min.cairo b/src/operators/tensor/math/reduce_min.cairo index eb268c1f2..9fb0d1117 100644 --- a/src/operators/tensor/math/reduce_min.cairo +++ b/src/operators/tensor/math/reduce_min.cairo @@ -1,9 +1,5 @@ -use core::option::OptionTrait; -use core::traits::TryInto; -use core::traits::Into; - -use core::array::ArrayTrait; -use core::array::SpanTrait; +use alexandria_sorting::bubble_sort; +use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; @@ -12,10 +8,6 @@ use orion::operators::tensor::helpers::{ reduce_output_shape, len_from_shape, combine_indices, get_all_axes }; -use alexandria_sorting::bubble_sort; -use alexandria_data_structures::array_ext::{SpanTraitExt}; - - /// Cf: TensorTrait::reduce_min docstring fn reduce_min< T, @@ -33,7 +25,7 @@ fn reduce_min< ) -> Tensor { let noop_with_empty_axes = match noop_with_empty_axes { Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None => { false }, + Option::None => false, }; let axes = match axes { Option::Some(axes) => { @@ -41,7 +33,7 @@ fn reduce_min< get_all_axes(*self.shape) } else { assert(axes.len() == axes.unique().len(), 'duplicated axis.'); - let mut axes_arr = ArrayTrait::new(); + let mut axes_arr: Array = array![]; let mut copy_axes = axes; loop { match copy_axes.pop_front() { @@ -54,7 +46,7 @@ fn reduce_min< } }, Option::None => { - if (noop_with_empty_axes == true) { + if noop_with_empty_axes { return *self; } get_all_axes(*self.shape) @@ -62,7 +54,7 @@ fn reduce_min< }; let keepdims = match keepdims { Option::Some(keepdims) => keepdims, - Option::None => { true }, + Option::None => true, }; let mut axis_c = 0; @@ -78,21 +70,19 @@ fn reduce_min< data = array![current_min].span(); break (); } - let mut temp_data = ArrayTrait::new(); + let mut temp_data = array![]; let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); let data_len = len_from_shape(temp_shape); let mut index: usize = 0; - loop { + while index != data_len { let indices = unravel_index(index, temp_shape); let current_min = accumulate_min::(data, shape, indices, *axis - axis_c); temp_data.append(current_min); index += 1; - if index == data_len { - break (); - }; }; + shape = temp_shape; data = temp_data.span(); axis_c += 1; @@ -102,7 +92,7 @@ fn reduce_min< }; let mut axes_copy = axes; - if keepdims == true { + if keepdims { shape = *self.shape; loop { match axes_copy.pop_front() { @@ -110,9 +100,10 @@ fn reduce_min< Option::None => { break; } }; }; - return TensorTrait::::new(shape, data); + + TensorTrait::::new(shape, data) } else { - return TensorTrait::::new(shape, data); + TensorTrait::::new(shape, data) } } @@ -145,11 +136,7 @@ fn accumulate_min< let mut axis_index = 0; if (input_shape).len() > 1 { - loop { - if axis_index == axis_len { - break (); - } - + while axis_index != axis_len { let input_indices = combine_indices(output_indices, axis_index, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; @@ -169,5 +156,6 @@ fn accumulate_min< }; }; } - return min; + + min } diff --git a/src/operators/tensor/math/reduce_prod.cairo b/src/operators/tensor/math/reduce_prod.cairo index cf66dec97..f5df6179d 100644 --- a/src/operators/tensor/math/reduce_prod.cairo +++ b/src/operators/tensor/math/reduce_prod.cairo @@ -1,8 +1,3 @@ -use core::option::OptionTrait; -use core::traits::MulEq; -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; @@ -64,14 +59,14 @@ fn reduce_prod< /// - ONNX: Open Neural Network Exchange: https://onnx.ai/ /// /// ``` - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; if (*self.shape).len() == 1 { assert(axis == 0, 'axis out of dimensions'); let current_prod = accumulate_production::(*self.data, *self.shape, *self.shape, axis); output_data.append(current_prod); - let mut output_shape = ArrayTrait::new(); + let mut output_shape = array![]; output_shape.append(1); return TensorTrait::new(output_shape.span(), output_data.span()); @@ -80,7 +75,7 @@ fn reduce_prod< let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_sum = accumulate_production::< T @@ -89,16 +84,14 @@ fn reduce_prod< output_data.append(current_sum); index += 1; - if index == output_data_len { - break (); - }; }; if keepdims { let output_shape = reduce_output_shape(*self.shape, axis, true); - return TensorTrait::::new(output_shape, output_data.span()); + + TensorTrait::::new(output_shape, output_data.span()) } else { - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } } } diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index ab834136f..078345f4a 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,12 +1,7 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; - /// Cf: TensorTrait::reduce_sum docstring fn reduce_sum< T, @@ -19,14 +14,14 @@ fn reduce_sum< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let mut output_data = ArrayTrait::new(); + let mut output_data: Array = array![]; if (*self.shape).len() == 1 { assert(axis == 0, 'axis out of dimensions'); let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); output_data.append(current_sum); - let mut output_shape = ArrayTrait::new(); + let mut output_shape: Array = array![]; output_shape.append(1); return TensorTrait::new(output_shape.span(), output_data.span()); @@ -35,23 +30,21 @@ fn reduce_sum< let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); let mut index: usize = 0; - loop { + while index != output_data_len { let output_indices = unravel_index(index, output_shape); let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); output_data.append(current_sum); index += 1; - if index == output_data_len { - break (); - }; }; if keepdims { let output_shape = reduce_output_shape(*self.shape, axis, true); - return TensorTrait::::new(output_shape, output_data.span()); + + TensorTrait::::new(output_shape, output_data.span()) } else { - return TensorTrait::::new(output_shape, output_data.span()); + TensorTrait::::new(output_shape, output_data.span()) } } } diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index 329b8fb4e..b8ad7df99 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -1,13 +1,7 @@ -use core::option::OptionTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; - fn square< T, MAG, @@ -20,7 +14,7 @@ fn square< self: @Tensor ) -> Tensor { let mut data = *self.data; - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; loop { match data.pop_front() { @@ -33,7 +27,8 @@ fn square< }; let tensor_square = TensorTrait::new(*self.shape, output_data.span()); - return tensor_square; + + tensor_square } /// Cf: TensorTrait::reduce_sum_square docstring @@ -51,5 +46,6 @@ fn reduce_sum_square< ) -> Tensor { let tensor_square = square(self); let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); - return tensor_square_sum; + + tensor_square_sum } diff --git a/src/operators/tensor/math/resize.cairo b/src/operators/tensor/math/resize.cairo index 961fc3853..ab0ef86f7 100644 --- a/src/operators/tensor/math/resize.cairo +++ b/src/operators/tensor/math/resize.cairo @@ -1,15 +1,10 @@ -use core::traits::TryInto; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; -use orion::numbers::NumberTrait; use alexandria_sorting::bubble_sort; + +use orion::numbers::NumberTrait; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use core::debug::PrintTrait; #[derive(Copy, Drop)] enum MODE { @@ -43,7 +38,6 @@ enum TRANSFORMATION_MODE { HALF_PIXEL_SYMMETRIC } - /// Cf: TensorTrait::resize docstring fn resize< T, @@ -91,7 +85,8 @@ fn resize< axes, cubic_coeff_a ); - return output; + + output } fn interpolate_nd< @@ -157,18 +152,17 @@ fn interpolate_nd< Option::Some(scale_factors) => { let mut new_scale_factors = ArrayTrait::::new(); let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break *scale_factors.at(i); } + i += 1; }; new_scale_factors.append(item); @@ -182,25 +176,25 @@ fn interpolate_nd< let mut output_size = match output_size { Option::Some(output_size) => { - let mut new_output_size = ArrayTrait::new(); + let mut new_output_size = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break *(*data).shape.at(d); } + if *axes.at(i) == d { break *output_size.at(i); } + i += 1; }; new_output_size.append(item); d += 1; }; + Option::Some(new_output_size.span()) }, Option::None => { Option::None }, @@ -208,75 +202,71 @@ fn interpolate_nd< let mut roi = match roi { Option::Some(roi) => { - let mut new_roi_data = ArrayTrait::new(); + let mut new_roi_data = array![]; let naxes = axes.len(); let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::zero(); } + if *axes.at(i) == d { break *roi.data.at(i); } + i += 1; }; + new_roi_data.append(item); d += 1; }; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break *roi.data.at(i + naxes); } + i += 1; }; + new_roi_data.append(item); d += 1; }; + let mut shape = ArrayTrait::new(); shape.append(r * 2); Option::Some(TensorTrait::new(shape.span(), new_roi_data.span())) }, Option::None => { Option::None }, }; + (axes, scale_factors, output_size, roi) }, Option::None => { - let mut axes = ArrayTrait::new(); + let mut axes = array![]; let mut i = 0; - loop { - if i == r { - break; - } + while i != r { axes.append(i); i += 1; }; + (axes.span(), scale_factors, output_size, roi) } }; let (mut output_size, mut scale_factors) = match output_size { Option::Some(output_size) => { - let mut scale_factors = ArrayTrait::::new(); + let mut scale_factors: Array = array![]; let mut i = 0; - loop { - if i == r { - break; - } - + while i != r { let output_size_i: T = NumberTrait::new_unscaled( (*output_size.at(i)).into(), false ); @@ -293,47 +283,42 @@ fn interpolate_nd< KEEP_ASPECT_RATIO_POLICY::NOT_LARGER => { let mut scale = *scale_factors.at(*axes.at(0)); let mut i = 1; - loop { - if i == axes.len() { - break; - } + while i != axes.len() { if scale > *scale_factors.at(*axes.at(i)) { scale = *scale_factors.at(*axes.at(i)); } + i += 1; }; - let mut scale_factors = ArrayTrait::::new(); + let mut scale_factors: Array = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break scale; } + i += 1; }; scale_factors.append(item); d += 1; }; - let mut output_size = ArrayTrait::new(); + let mut output_size = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break *(*data).shape.at(d); } + if *axes.at(i) == d { break NumberTrait::round( scale @@ -344,56 +329,54 @@ fn interpolate_nd< .try_into() .unwrap(); } + i += 1; }; output_size.append(item); d += 1; }; + (output_size.span(), scale_factors.span()) }, KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER => { let mut scale = *scale_factors.at(*axes.at(0)); let mut i = 1; - loop { - if i == axes.len() { - break; - } + while i != axes.len() { if scale < *scale_factors.at(*axes.at(i)) { scale = *scale_factors.at(*axes.at(i)); } + i += 1; }; - let mut scale_factors = ArrayTrait::::new(); + + let mut scale_factors: Array = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break NumberTrait::one(); } + if *axes.at(i) == d { break scale; } + i += 1; }; scale_factors.append(item); d += 1; }; - let mut output_size = ArrayTrait::new(); + let mut output_size = array![]; let mut d = 0; - loop { - if d == r { - break; - } + while d != r { let mut i = 0; let item = loop { if i == axes.len() { break *(*data).shape.at(d); } + if *axes.at(i) == d { break NumberTrait::round( scale @@ -404,11 +387,13 @@ fn interpolate_nd< .try_into() .unwrap(); } + i += 1; }; output_size.append(item); d += 1; }; + (output_size.span(), scale_factors.span()) }, }; @@ -416,7 +401,7 @@ fn interpolate_nd< (output_size, scale_factors) }, Option::None => { - let mut output_size = ArrayTrait::::new(); + let mut output_size: Array = array![]; let scale_factors = match scale_factors { Option::Some(scale_factors) => scale_factors, @@ -424,54 +409,44 @@ fn interpolate_nd< }; let mut i = 0; - loop { - if i == scale_factors.len() { - break; - } - + while i != scale_factors.len() { let item = *scale_factors.at(i) * NumberTrait::new_unscaled((*(*(data).shape).at(i)).into(), false); output_size.append(item.try_into().unwrap()); i += 1; }; + (output_size.span(), scale_factors) }, }; - let mut ret = ArrayTrait::>::new(); + let mut ret: Array> = array![]; let mut i = 0; - loop { + while i != output_size.len() { let mut temp = ArrayTrait::::new(); - if i == output_size.len() { - break; - } let mut j = 0; - loop { - if j == *output_size.at(i) { - break; - } + while j != *output_size.at(i) { temp.append(j); j += 1; }; + ret.append(temp.span()); i += 1; }; let mut ret = cartesian(ret.span()); - let mut ret_data = ArrayTrait::new(); + let mut ret_data = array![]; loop { match ret.pop_front() { Option::Some(X) => { - let mut x = ArrayTrait::::new(); + let mut x: Array = array![]; let mut i = 0; - loop { - if i == X.len() { - break; - } + while i != X.len() { x.append(NumberTrait::new_unscaled((*X.at(i)).into(), false)); i += 1; }; + let mut x = x.span(); let item = interpolate_nd_with_x( data, @@ -495,9 +470,10 @@ fn interpolate_nd< } }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(ret_data.len()); - return TensorTrait::new(output_size, ret_data.span()); + + TensorTrait::new(output_size, ret_data.span()) } fn cartesian(mut arrays: Span>,) -> Array> { @@ -512,24 +488,18 @@ fn cartesian(mut arrays: Span>,) -> Array> { }; let mut i = 0; - let mut size_arrays = ArrayTrait::new(); - loop { - if i == arrays.len() { - break; - } + let mut size_arrays = array![]; + while i != arrays.len() { size_arrays.append((*(arrays.at(i))).len()); - i += 1; }; + let size_arrays = size_arrays.span(); - let mut output_arrays = ArrayTrait::>::new(); + let mut output_arrays = array![]; let mut m = n; let mut i = 0; - loop { - if i == arrays.len() { - break; - } + while i != arrays.len() { m = m / (*(arrays.at(i))).len(); let mut out = repeat(*(arrays.at(i)), m); out = repeat_2(out, size_arrays, i); @@ -537,75 +507,58 @@ fn cartesian(mut arrays: Span>,) -> Array> { output_arrays.append(out); i += 1; }; + let output_arrays = output_arrays.span(); let mut i = 0; - let mut ret = ArrayTrait::>::new(); - loop { - if i == n { - break; - } + let mut ret = array![]; + while i != n { let mut j = 0; - let mut x = ArrayTrait::new(); - loop { - if j == arrays.len() { - break; - } - + let mut x = array![]; + while j != arrays.len() { x.append(*(output_arrays.at(j)).at(i)); j += 1; }; + ret.append(x); i += 1; }; - return ret; + ret } - fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { let mut size = array.len(); let mut i = 0; - loop { - if i == index { - break; - } + while i != index { let mut j = 1; - loop { - if j == *size_array.at(index - 1 - i) { - break; - } + while j != *size_array.at(index - 1 - i) { let mut k = 0; - loop { - if k == size { - break; - } + while k != size { array.append(*array.at(k)); k += 1; }; + j += 1; }; + size = size * *size_array.at(index - 1 - i); i += 1; }; + array } fn repeat(array: Span, m: usize,) -> Array { - let mut out = ArrayTrait::new(); + let mut out = array![]; let mut j = 0; - loop { - if j == array.len() { - break; - } + while j != array.len() { let mut k = 0; - loop { - if k == m { - break; - } + while k != m { out.append(*array.at(j)); k += 1; }; + j += 1; }; @@ -659,7 +612,8 @@ fn interpolate_nd_with_x< cubic_coeff_a ); } - let mut res1d = ArrayTrait::new(); + + let mut res1d = array![]; let scale_factor_zero = match scale_factor.pop_front() { Option::Some(item) => { *item }, @@ -681,13 +635,11 @@ fn interpolate_nd_with_x< reduced_roi_shape.append(roi.data.len() - 2); let mut i = 1; - loop { - if i == 2 * n { - break; - } + while i != 2 * n { if i != n { reduced_roi.append(*roi.data.at(i)); } + i += 1; }; Option::Some(TensorTrait::new(reduced_roi_shape.span(), reduced_roi.span())) @@ -696,10 +648,7 @@ fn interpolate_nd_with_x< }; let mut i = 0; - loop { - if i == *(*data).shape.at(0) { - break; - } + while i != *(*data).shape.at(0) { let data = get_row_n(data, i); let mut r = interpolate_nd_with_x( @@ -717,24 +666,26 @@ fn interpolate_nd_with_x< exclude_outside, cubic_coeff_a ); + loop { match r.data.pop_front() { Option::Some(item) => { res1d.append(*item); }, Option::None => { break; } } }; + i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(res1d.len()); let res1d = TensorTrait::new(shape.span(), res1d.span()); let reduced_roi = match roi { Option::Some(roi) => { - let mut reduced_roi = ArrayTrait::new(); - let mut reduced_roi_shape = ArrayTrait::new(); + let mut reduced_roi = array![]; + let mut reduced_roi_shape = array![]; reduced_roi_shape.append(2); reduced_roi.append(*roi.data.at(0)); @@ -760,45 +711,40 @@ fn interpolate_nd_with_x< cubic_coeff_a ); - //let mut ret = ArrayTrait::new(); - //let mut shape = ArrayTrait::new(); + //let mut ret = array![]; + //let mut shape = array![]; //shape.append(2); //ret.append(NumberTrait::zero()); - return a; + + a } fn get_row_n, +Copy, +Drop,>( data: @Tensor, index: usize, ) -> Tensor { - let mut output_data = ArrayTrait::new(); - let mut output_shape = ArrayTrait::new(); + let mut output_data = array![]; + let mut output_shape = array![]; let mut stride_output = 1; let mut i = 0; - loop { - if i == (*data).shape.len() { - break; - } + while i != (*data).shape.len() { if i != 0 { output_shape.append(*(*data).shape.at(i)); stride_output = stride_output * *(*data).shape.at(i); } + i += 1; }; let mut i = 0; - loop { - if i == stride_output { - break; - } + while i != stride_output { output_data.append(*(*data).data.at(index * stride_output + i)); i += 1; }; - return TensorTrait::new(output_shape.span(), output_data.span()); + TensorTrait::new(output_shape.span(), output_data.span()) } - fn interpolate_1d_with_x< T, MAG, @@ -915,9 +861,7 @@ fn interpolate_1d_with_x< let mut coeffs = match mode { MODE::NEAREST => { let coeffs = match antialias { - Option::Some => core::panic_with_felt252( - 'antialias not for mode NEAREST' - ), + Option::Some => core::panic_with_felt252('antialias not for mode NEAREST'), Option::None => { nearest_coeffs(ratio, nearest_mode) }, }; coeffs @@ -938,9 +882,7 @@ fn interpolate_1d_with_x< }, MODE::CUBIC => { let coeffs = match antialias { - Option::Some => { - cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) - }, + Option::Some => { cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) }, Option::None => { cubic_coeffs(ratio, cubic_coeff_a) }, }; coeffs @@ -952,13 +894,10 @@ fn interpolate_1d_with_x< let (idxes, points) = get_neighbor(x_ori, n, data); if exclude_outside { - let mut coeffs_exclude_outside = ArrayTrait::::new(); + let mut coeffs_exclude_outside: Array = array![]; let mut sum = NumberTrait::zero(); let mut i = 0; - loop { - if i == idxes.data.len() { - break; - } + while i != idxes.data.len() { if *idxes.data.at(i) { coeffs_exclude_outside.append(NumberTrait::zero()); sum += NumberTrait::zero(); @@ -966,23 +905,22 @@ fn interpolate_1d_with_x< coeffs_exclude_outside.append(*coeffs.data.at(i)); sum += *coeffs.data.at(i); } + i += 1; }; - let mut coeff_div = ArrayTrait::::new(); + let mut coeff_div: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { coeff_div.append(*coeffs_exclude_outside.at(i) / sum); i += 1; }; + coeffs = TensorTrait::new(coeffs.shape, coeff_div.span()); } - return TensorTrait::matmul(@coeffs, @points); -} + TensorTrait::matmul(@coeffs, @points) +} fn get_neighbor< T, @@ -1009,29 +947,22 @@ fn get_neighbor< ) .try_into() .unwrap(); - let mut padded = ArrayTrait::new(); + let mut padded = array![]; let mut i = 0; - loop { - if i == pad_width { - break; - } + while i != pad_width { padded.append(*(*data).data.at(0)); i += 1; }; + let mut i = 0; - loop { - if i == (*data).data.len() { - break; - } + while i != (*data).data.len() { padded.append(*(*data).data.at(i)); i += 1; }; + let mut i = 0; - loop { - if i == pad_width { - break; - } + while i != pad_width { padded.append(*(*data).data.at((*data).data.len() - 1)); i += 1; }; @@ -1040,13 +971,10 @@ fn get_neighbor< let mut idxes = get_neighbor_idxes(x, n, padded.len()); - let mut idxes_centered = ArrayTrait::new(); - let mut ret = ArrayTrait::new(); + let mut idxes_centered = array![]; + let mut ret = array![]; let mut i = 0; - loop { - if i == idxes.data.len() { - break; - } + while i != idxes.data.len() { ret.append(*padded.at(*idxes.data.at(i))); if *idxes.data.at(i) >= pad_width { @@ -1058,16 +986,17 @@ fn get_neighbor< } else { idxes_centered.append(true); } + i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(idxes.data.len()); - return ( + ( TensorTrait::new(shape.span(), idxes_centered.span()), TensorTrait::new(shape.span(), ret.span()) - ); + ) } fn get_neighbor_idxes< @@ -1097,7 +1026,7 @@ fn get_neighbor_idxes< ) .try_into() .unwrap(); - let mut idxes = ArrayTrait::new(); + let mut idxes = array![]; if n % 2 == 0 { let (mut i_low, mut i_high) = if x < NumberTrait::zero() { @@ -1120,10 +1049,7 @@ fn get_neighbor_idxes< } let mut i = 0; - loop { - if i == n / 2 { - break; - } + while i != n / 2 { if i_low - i < 0 { idxes.append(i_high + i); i_high += 1; @@ -1136,6 +1062,7 @@ fn get_neighbor_idxes< } else { idxes.append(i_high + i); } + i += 1; } } else { @@ -1144,10 +1071,10 @@ fn get_neighbor_idxes< idxes = bubble_sort::bubble_sort_elements(idxes, true); - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(n); - return TensorTrait::new(shape.span(), idxes.span()); + TensorTrait::new(shape.span(), idxes.span()) } fn linear_coeffs< @@ -1163,14 +1090,14 @@ fn linear_coeffs< >( mut ratio: T ) -> Tensor { - let mut ret = ArrayTrait::new(); - let mut shape = ArrayTrait::new(); + let mut ret = array![]; + let mut shape = array![]; shape.append(2); ret.append(NumberTrait::one() - ratio); ret.append(ratio); - return TensorTrait::new(shape.span(), ret.span()); -} + TensorTrait::new(shape.span(), ret.span()) +} fn linear_coeffs_antialias< T, @@ -1197,15 +1124,12 @@ fn linear_coeffs_antialias< let footprint = (NumberTrait::one() + NumberTrait::one()) - (NumberTrait::one() + NumberTrait::one()) * start; - let mut coeffs = ArrayTrait::::new(); + let mut coeffs: Array = array![]; let mut sum = NumberTrait::zero(); // arange and clip + compute sum let mut i = start; - loop { - if i == start + footprint { - break; - } + while i != start + footprint { let value = NumberTrait::one() - NumberTrait::abs((i - ratio) * scale); if value < NumberTrait::zero() { @@ -1217,25 +1141,23 @@ fn linear_coeffs_antialias< coeffs.append(value); sum += value; } + i += NumberTrait::one(); }; let n = coeffs.len(); - let mut coeff_div = ArrayTrait::::new(); + let mut coeff_div: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { coeff_div.append(*coeffs.at(i) / sum); i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(n); - return TensorTrait::new(shape.span(), coeff_div.span()); + TensorTrait::new(shape.span(), coeff_div.span()) } fn cubic_coeffs< @@ -1267,8 +1189,8 @@ fn cubic_coeffs< Option::None => { NumberTrait::neg(three / four) }, }; - let mut coeffs = ArrayTrait::new(); - let mut shape = ArrayTrait::new(); + let mut coeffs = array![]; + let mut shape = array![]; coeffs .append( @@ -1284,7 +1206,8 @@ fn cubic_coeffs< ); shape.append(4); - return TensorTrait::new(shape.span(), coeffs.span()); + + TensorTrait::new(shape.span(), coeffs.span()) } fn cubic_coeffs_antialias< @@ -1322,14 +1245,11 @@ fn cubic_coeffs_antialias< Option::None => { NumberTrait::neg(three / four) }, }; - let mut coeffs = ArrayTrait::new(); + let mut coeffs = array![]; let mut sum = NumberTrait::zero(); let mut i = i_start; - loop { - if i == i_end { - break; - } + while i != i_end { let value = compute_coeff(scale * (i - ratio), A); coeffs.append(value); sum += value; @@ -1339,20 +1259,17 @@ fn cubic_coeffs_antialias< let n = coeffs.len(); - let mut coeff_div = ArrayTrait::::new(); + let mut coeff_div: Array = array![]; let mut i = 0; - loop { - if i == n { - break; - } + while i != n { coeff_div.append(*coeffs.at(i) / sum); i += 1; }; - let mut shape = ArrayTrait::new(); + let mut shape = array![]; shape.append(n); - return TensorTrait::new(shape.span(), coeff_div.span()); + TensorTrait::new(shape.span(), coeff_div.span()) } fn compute_coeff< @@ -1388,9 +1305,9 @@ fn compute_coeff< if x < two { return A * x_3 - five * A * x_2 + eigth * A * x - four * A; } - return NumberTrait::zero(); -} + NumberTrait::zero() +} fn nearest_coeffs< T, @@ -1414,8 +1331,8 @@ fn nearest_coeffs< Option::None => { NEAREST_MODE::ROUND_PREFER_FLOOR }, }; - let mut ret = ArrayTrait::new(); - let mut shape = ArrayTrait::new(); + let mut ret = array![]; + let mut shape = array![]; shape.append(2); // CHECK SI C'EST UNE CONDITION ASSEZ GENERALE diff --git a/src/operators/tensor/math/round.cairo b/src/operators/tensor/math/round.cairo index 5515dad9b..fc1680f16 100644 --- a/src/operators/tensor/math/round.cairo +++ b/src/operators/tensor/math/round.cairo @@ -1,12 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn round< T, MAG, @@ -17,7 +11,7 @@ fn round< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { diff --git a/src/operators/tensor/math/scatter.cairo b/src/operators/tensor/math/scatter.cairo index a108ae4e2..fe3d9ffce 100644 --- a/src/operators/tensor/math/scatter.cairo +++ b/src/operators/tensor/math/scatter.cairo @@ -1,18 +1,10 @@ use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; - -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; use core::dict::Felt252DictTrait; use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; + /// Cf: TensorTrait::scatter docstring fn scatter< T, @@ -57,7 +49,7 @@ fn scatter< 'shape must be same' ); - let mut output_data = ArrayTrait::new(); + let mut output_data = array![]; let mut data_indices = indices.data; let mut data_updates = updates.data; let mut indices_updates: Felt252Dict = Default::default(); @@ -68,25 +60,20 @@ fn scatter< *data_shape_copy.pop_front().unwrap(); *indices_shape_copy.pop_front().unwrap(); - let mut indices_loop: usize = 1; let mut data_loop: usize = 1; if (axis == 0) { loop { match indices_shape_copy.pop_front() { - Option::Some(val) => { - indices_loop *= *val; - }, + Option::Some(val) => { indices_loop *= *val; }, Option::None => { break; } }; }; loop { match data_shape_copy.pop_front() { - Option::Some(val) => { - data_loop *= *val; - }, + Option::Some(val) => { data_loop *= *val; }, Option::None => { break; } }; }; @@ -140,7 +127,7 @@ fn scatter< if (reduction == 'none') { indices_updates.insert(result.into(), value.into()); } else { - let mut arr = ArrayTrait::new(); + let mut arr = array![]; let val = indices_updates_reduction.get(result.into()); let mut a = ArrayTrait::new(); @@ -155,10 +142,12 @@ fn scatter< Option::None => { break; } }; }; + arr.append(total_count); indices_updates_reduction .insert(result.into(), nullable_from_box(BoxTrait::new(arr.span()))); } + total_count += 1; }, Option::None => { break; } @@ -180,7 +169,7 @@ fn scatter< } } else { let value = indices_updates_reduction.get(i.into()); - let mut a = ArrayTrait::new(); + let mut a = array![]; let mut span = match match_nullable(value) { FromNullableResult::Null(()) => a.span(), FromNullableResult::NotNull(value) => value.unbox(), @@ -199,6 +188,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } @@ -209,6 +199,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } @@ -224,6 +215,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } @@ -239,6 +231,7 @@ fn scatter< Option::None => { break; } }; }; + output_data.append(result); } } @@ -252,9 +245,9 @@ fn scatter< let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); - if (transpose == true) { + if transpose { output_tensor = output_tensor.transpose(axes: array![0, 2, 1].span()) } - return output_tensor; + output_tensor } diff --git a/src/operators/tensor/math/scatter_nd.cairo b/src/operators/tensor/math/scatter_nd.cairo index 61535f618..73acb98ad 100644 --- a/src/operators/tensor/math/scatter_nd.cairo +++ b/src/operators/tensor/math/scatter_nd.cairo @@ -1,18 +1,10 @@ -use alexandria_data_structures::array_ext::SpanTraitExt; -use core::array::ArrayTrait; -use core::array::SpanTrait; +use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; -use core::traits::Into; -use core::debug::PrintTrait; -use core::traits::TryInto; -use core::serde::Serde; -use core::traits::Destruct; -use core::option::OptionTrait; +use alexandria_data_structures::array_ext::SpanTraitExt; use orion::numbers::NumberTrait; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -use core::dict::Felt252DictTrait; -use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; + /// Cf: TensorTrait::scatter_nd docstring fn scatter_nd< T, @@ -24,12 +16,8 @@ fn scatter_nd< impl TPartialOrd: PartialOrd, impl TPartialEq: PartialEq, >( - self: @Tensor, - updates: Tensor, - indices: Tensor, - reduction: Option + self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option ) -> Tensor { - let reduction = match reduction { Option::Some(val) => val, Option::None => 'none' @@ -44,16 +32,16 @@ fn scatter_nd< assert(*indices_last_axis <= data_rank, 'must be <= data rank'); let ind_max = indices.data.max().unwrap(); - if (data_rank > 1){ + if (data_rank > 1) { assert(ind_max < data_rank, 'index is out of bound'); } - let mut batch_dims_shape = ArrayTrait::new(); + let mut batch_dims_shape = array![]; let mut ind: usize = 0; loop { match indices_shape.pop_front() { - Option::Some(val) => { batch_dims_shape.append(*val);}, + Option::Some(val) => { batch_dims_shape.append(*val); }, Option::None => { break; } }; }; @@ -61,11 +49,11 @@ fn scatter_nd< let mut data_shape_clone = data_shape.clone(); loop { match data_shape_clone.pop_front() { - Option::Some(val) => { + Option::Some(val) => { if (ind >= *indices_last_axis) { batch_dims_shape.append(*val); - } - }, + } + }, Option::None => { break; } }; }; @@ -73,10 +61,8 @@ fn scatter_nd< let mut ind: usize = 0; loop { match batch_dims_shape.pop_front() { - Option::Some(val) => { - assert(val == *updates_shape[ind], 'must be same'); - }, - Option::None => { break; } + Option::Some(val) => { assert(val == *updates_shape[ind], 'must be same'); }, + Option::None => { break; } }; }; @@ -89,7 +75,7 @@ fn scatter_nd< if data_rank >= 1 { loop { match data_shape_clone.pop_front() { - Option::Some(val) => { indexer *= *val;}, + Option::Some(val) => { indexer *= *val; }, Option::None => { break; } }; } @@ -99,7 +85,7 @@ fn scatter_nd< let mut dict_ind: usize = 1; loop { match data_indices.pop_front() { - Option::Some(val) => { + Option::Some(val) => { updates_index_dict.insert((*val).into(), dict_ind); dict_ind += 1; }, @@ -107,68 +93,62 @@ fn scatter_nd< }; }; - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let mut data = *self.data; let mut index: usize = 0; let mut inner_index: usize = 0; let num = *data_shape_first.unwrap(); - loop { - if (index == num){ - break; - } + while index != num { let comp_index = updates_index_dict.get(index.into()); - if (comp_index == 0) { + if comp_index == 0 { loop { - if (inner_index == indexer) { + if (inner_index == indexer) { inner_index = 0; - break; + break; } let val = *data.at((index * indexer) + inner_index); output_data.append(val); inner_index += 1; }; - } - else { + } else { loop { - if (inner_index == indexer) { + if (inner_index == indexer) { inner_index = 0; - break; + break; } - if (reduction == 'none'){ - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + if (reduction == 'none') { + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); output_data.append(*val); } if (reduction == 'add') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); output_data.append(*val + data_val); } if (reduction == 'mul') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); output_data.append((*val) * data_val); } if (reduction == 'max') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); if (*val > data_val) { output_data.append(*val); - } - else { + } else { output_data.append(data_val); } - } + } if (reduction == 'min') { - let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let val = data_updates.at(((comp_index - 1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); if (*val > data_val) { output_data.append(data_val); - } - else { + } else { output_data.append(*val); } - } + } inner_index += 1; } } @@ -176,6 +156,6 @@ fn scatter_nd< }; let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); - return output_tensor; - -} \ No newline at end of file + + output_tensor +} diff --git a/src/operators/tensor/math/shrink.cairo b/src/operators/tensor/math/shrink.cairo index 20ed4041f..c6d6f0409 100644 --- a/src/operators/tensor/math/shrink.cairo +++ b/src/operators/tensor/math/shrink.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -29,7 +25,7 @@ fn shrink< NumberTrait::half() }; - let mut data_result = ArrayTrait::::new(); + let mut data_result: Array = array![]; loop { match self.data.pop_front() { @@ -48,5 +44,5 @@ fn shrink< }; }; - return TensorTrait::new(self.shape, data_result.span()); + TensorTrait::new(self.shape, data_result.span()) } diff --git a/src/operators/tensor/math/sign.cairo b/src/operators/tensor/math/sign.cairo index 557a96995..afd97ef41 100644 --- a/src/operators/tensor/math/sign.cairo +++ b/src/operators/tensor/math/sign.cairo @@ -1,12 +1,6 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn sign< T, MAG, @@ -17,7 +11,7 @@ fn sign< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -26,5 +20,5 @@ fn sign< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/sin.cairo b/src/operators/tensor/math/sin.cairo index 91e5d9949..46f87d5e6 100644 --- a/src/operators/tensor/math/sin.cairo +++ b/src/operators/tensor/math/sin.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::sin docstring fn sin< T, @@ -19,7 +13,7 @@ fn sin< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -28,6 +22,6 @@ fn sin< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/sinh.cairo b/src/operators/tensor/math/sinh.cairo index 72caffd21..ca09e9f68 100644 --- a/src/operators/tensor/math/sinh.cairo +++ b/src/operators/tensor/math/sinh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::sinh docstring fn sinh< T, @@ -19,8 +13,7 @@ fn sinh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); - + let mut result: Array = array![]; loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).sinh()); }, @@ -28,5 +21,5 @@ fn sinh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/sqrt.cairo b/src/operators/tensor/math/sqrt.cairo index 22ca78d77..c3a0d4e6f 100644 --- a/src/operators/tensor/math/sqrt.cairo +++ b/src/operators/tensor/math/sqrt.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - fn sqrt< T, MAG, @@ -18,7 +12,7 @@ fn sqrt< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -27,5 +21,5 @@ fn sqrt< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/tanh.cairo b/src/operators/tensor/math/tanh.cairo index 681f4d8f6..9759cd9bd 100644 --- a/src/operators/tensor/math/tanh.cairo +++ b/src/operators/tensor/math/tanh.cairo @@ -1,13 +1,7 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::Into; - use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; - /// Cf: TensorTrait::tanh docstring fn tanh< T, @@ -19,7 +13,7 @@ fn tanh< >( mut self: Tensor ) -> Tensor { - let mut result = ArrayTrait::new(); + let mut result: Array = array![]; loop { match self.data.pop_front() { @@ -28,5 +22,5 @@ fn tanh< }; }; - return TensorTrait::new(self.shape, result.span()); + TensorTrait::new(self.shape, result.span()) } diff --git a/src/operators/tensor/math/where.cairo b/src/operators/tensor/math/where.cairo index ba71c1279..f09883a41 100644 --- a/src/operators/tensor/math/where.cairo +++ b/src/operators/tensor/math/where.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -21,12 +17,12 @@ fn where< ) -> Tensor { let xy_shape = broadcast_shape(*x.shape, *y.shape); let broadcasted_shape = broadcast_shape(*self.shape, xy_shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_cond = broadcast_index_mapping(*self.shape, indices_broadcasted); @@ -39,10 +35,7 @@ fn where< result.append(res); n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/math/xor.cairo b/src/operators/tensor/math/xor.cairo index 467fd1bfd..7ed06eba5 100644 --- a/src/operators/tensor/math/xor.cairo +++ b/src/operators/tensor/math/xor.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ @@ -20,12 +16,12 @@ fn xor< y: @Tensor, z: @Tensor ) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = ArrayTrait::new(); + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); let mut n: usize = 0; - loop { + while n != num_elements { let indices_broadcasted = unravel_index(n, broadcasted_shape); let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); @@ -38,10 +34,7 @@ fn xor< } n += 1; - if n == num_elements { - break (); - }; }; - return TensorTrait::new(broadcasted_shape, result.span()); + TensorTrait::new(broadcasted_shape, result.span()) } diff --git a/src/operators/tensor/ml/array_feature_extractor.cairo b/src/operators/tensor/ml/array_feature_extractor.cairo index 8605c00ab..efe8f099e 100644 --- a/src/operators/tensor/ml/array_feature_extractor.cairo +++ b/src/operators/tensor/ml/array_feature_extractor.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::option::OptionTrait; -use core::array::SpanTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; @@ -21,14 +17,14 @@ fn array_feature_extractor< let output_data = calculate_output_data::(self, indices, total_elements); - return TensorTrait::new(output_shape.span(), output_data.span()); + TensorTrait::new(output_shape.span(), output_data.span()) } fn process_1D_tensor, impl TCopy: Copy, impl TDrop: Drop>( self: Tensor, indices: Tensor ) -> Tensor { - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let mut indices_values: Span = indices.data; let self_len = *self.shape.at(0); @@ -43,7 +39,7 @@ fn process_1D_tensor, impl TCopy: Copy, impl }; }; - return TensorTrait::new(indices.shape, output_data.span()); + TensorTrait::new(indices.shape, output_data.span()) } @@ -53,7 +49,7 @@ fn calculate_output_shape< input_shape: Span, indices: Tensor ) -> (Array, usize) { let mut total_elements: usize = 1; - let mut output_shape: Array = ArrayTrait::new(); + let mut output_shape: Array = array![]; let mut input_shape_copy = input_shape; let mut input_shape_counter: usize = 0; @@ -75,7 +71,7 @@ fn calculate_output_shape< output_shape.append(indices.data.len()); - return (output_shape, total_elements); + (output_shape, total_elements) } @@ -84,18 +80,14 @@ fn calculate_output_data, impl TCopy: Copy, i ) -> Array { let last_tensor_axis: usize = *self.shape.at(self.shape.len() - 1); - let mut output_data = ArrayTrait::::new(); + let mut output_data: Array = array![]; let strides: Span = TensorTrait::stride(@self); let mut element_counter: usize = 0; let mut stride_l2 = *strides.at(strides.len() - 2); let mut stride_l1 = *strides.at(strides.len() - 1); - loop { - if element_counter > total_elements - 1 { - break; - } - + while element_counter != total_elements { let mut base_index = if strides.len() > 1 { element_counter * stride_l2 } else { @@ -119,5 +111,5 @@ fn calculate_output_data, impl TCopy: Copy, i element_counter += 1; }; - return output_data; + output_data } diff --git a/src/operators/tensor/quantization/dequantize_linear.cairo b/src/operators/tensor/quantization/dequantize_linear.cairo index b17c4a2d3..d9fef16a7 100644 --- a/src/operators/tensor/quantization/dequantize_linear.cairo +++ b/src/operators/tensor/quantization/dequantize_linear.cairo @@ -1,8 +1,3 @@ -use core::traits::Into; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::check_compatibility; use orion::utils::saturate; @@ -31,6 +26,7 @@ fn dequantize_linear< check_compatibility(*x.shape, *x_scale.shape); check_compatibility(*x.shape, *x_zero_point.shape); check_compatibility(*x_scale.shape, *x_zero_point.shape); + dequantize_per_axis(@(*x).into(), x_scale, x_zero_point) } } @@ -45,7 +41,6 @@ fn dequantize_per_axis< >( x: @Tensor, x_scale: @Tensor, x_zero_point: @Tensor ) -> Tensor:: { - (*x - *x_zero_point) * *x_scale } @@ -63,7 +58,7 @@ fn dequantize_element_wise< >( mut x: Tensor::, x_scale: T, x_zero_point: T ) -> Tensor:: { - let mut result_data = ArrayTrait::::new(); + let mut result_data: Array = array![]; loop { match x.data.pop_front() { @@ -75,7 +70,7 @@ fn dequantize_element_wise< }; }; - return TensorTrait::new(x.shape, result_data.span()); + TensorTrait::new(x.shape, result_data.span()) } fn dequantize< diff --git a/src/operators/tensor/quantization/dynamic_quantize_linear.cairo b/src/operators/tensor/quantization/dynamic_quantize_linear.cairo index 085132e92..db8be469d 100644 --- a/src/operators/tensor/quantization/dynamic_quantize_linear.cairo +++ b/src/operators/tensor/quantization/dynamic_quantize_linear.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -42,37 +38,36 @@ fn dynamic_quantize_linear< let y_scale_value: T = (x_max - x_min) / (max - min); if x_max == x_min { y_scale_values.append(one); - }else{ + } else { y_scale_values.append(y_scale_value); } - - - let mut y_scale_tensor_shape = ArrayTrait::new(); + + let mut y_scale_tensor_shape: Array = array![]; y_scale_tensor_shape.append(y_scale_values.len()); - let y_scale = TensorTrait::::new( - shape: y_scale_tensor_shape.span(), data: y_scale_values.span(), - ); + let y_scale = TensorTrait::< + T + >::new(shape: y_scale_tensor_shape.span(), data: y_scale_values.span(),); // intermediate_zero_point = qmin - min(x)/y_scale let intermediate_zero_point: T = min - x_min / y_scale_value; // y_zero_point = cast(round(saturate(itermediate_zero_point))) let mut y_zero_point_value: T = saturate(min, max, intermediate_zero_point); - let mut y_zero_point_values = ArrayTrait::new(); + let mut y_zero_point_values: Array = array![]; y_zero_point_values.append(y_zero_point_value); - let mut y_zero_point_tensor_shape = ArrayTrait::new(); + let mut y_zero_point_tensor_shape: Array = array![]; y_zero_point_tensor_shape.append(y_zero_point_values.len()); - let mut y_zero_point_values = ArrayTrait::new(); + let mut y_zero_point_values: Array = array![]; y_zero_point_values.append(y_zero_point_value); - let mut y_zero_point = TensorTrait::::new( - shape: y_zero_point_tensor_shape.span(), data: y_zero_point_values.span(), - ); + let mut y_zero_point = TensorTrait::< + T + >::new(shape: y_zero_point_tensor_shape.span(), data: y_zero_point_values.span(),); // y_zero_point = y_zero_point.round(); // tensor only supported! // y = saturate (round (x / y_scale) + y_zero_point) - return (quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point); -} \ No newline at end of file + (quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point) +} diff --git a/src/operators/tensor/quantization/qlinear_add.cairo b/src/operators/tensor/quantization/qlinear_add.cairo index 3c69564c2..20673c1c7 100644 --- a/src/operators/tensor/quantization/qlinear_add.cairo +++ b/src/operators/tensor/quantization/qlinear_add.cairo @@ -1,13 +1,8 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; use orion::operators::tensor::{TensorTrait, Tensor}; - fn qlinear_add< T, MAG, @@ -49,5 +44,5 @@ fn qlinear_add< let mut x = (dequantized_a + dequantized_b).into(); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + quantize_linear(@x, y_scale, y_zero_point, min, max) } diff --git a/src/operators/tensor/quantization/qlinear_concat.cairo b/src/operators/tensor/quantization/qlinear_concat.cairo index 7d6280202..43e11bbdb 100644 --- a/src/operators/tensor/quantization/qlinear_concat.cairo +++ b/src/operators/tensor/quantization/qlinear_concat.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -50,7 +46,7 @@ fn qlinear_concat< //let mut x = TensorTrait::concat(tensors: array![dequantized_a, dequantized_b].span(), axis: axis); let mut x = concat_dequantize(tensors, scales, zero_points, axis, min, max); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + quantize_linear(@x, y_scale, y_zero_point, min, max) } @@ -125,7 +121,7 @@ fn dequantize_tensors< min: T, max: T ) -> Span> { - let mut array = ArrayTrait::>::new(); + let mut array: Array> = array![]; let mut i = 0; loop { match tensors.pop_front() { @@ -135,9 +131,11 @@ fn dequantize_tensors< }, Option::None => { break; } }; + i += 1; }; - return array.span(); + + array.span() } /// # tensor.concat /// diff --git a/src/operators/tensor/quantization/qlinear_leakyrelu.cairo b/src/operators/tensor/quantization/qlinear_leakyrelu.cairo index 4fc0db823..bfa9346ff 100644 --- a/src/operators/tensor/quantization/qlinear_leakyrelu.cairo +++ b/src/operators/tensor/quantization/qlinear_leakyrelu.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -36,7 +32,7 @@ fn qlinear_leakyrelu< ) -> Tensor { let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point); - let mut result_data = ArrayTrait::::new(); + let mut result_data: Array = array![]; loop { match dequantized_a.data.pop_front() { Option::Some(elem) => { @@ -50,7 +46,7 @@ fn qlinear_leakyrelu< }; }; - return quantize_linear( + quantize_linear( @TensorTrait::new(dequantized_a.shape, result_data.span()), a_scale, a_zero_point, min, max - ); + ) } diff --git a/src/operators/tensor/quantization/qlinear_matmul.cairo b/src/operators/tensor/quantization/qlinear_matmul.cairo index 03e542945..325f4fd30 100644 --- a/src/operators/tensor/quantization/qlinear_matmul.cairo +++ b/src/operators/tensor/quantization/qlinear_matmul.cairo @@ -1,13 +1,8 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; use orion::operators::tensor::{TensorTrait, Tensor}; - /// Cf: TensorTrait::qlinear_matmul docstring fn qlinear_matmul< T, @@ -65,8 +60,8 @@ fn qlinear_matmul< assert(a_ndim == b_ndim, 'dim missmatch'); let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point); let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point); - let mut x_shape = ArrayTrait::::new(); - let mut x_data = ArrayTrait::::new(); + let mut x_shape: Array = array![]; + let mut x_data: Array = array![]; assert(a_shape[a_ndim - 1] == b_shape[b_ndim - 2], 'incompatible dim for matmul'); @@ -74,20 +69,16 @@ fn qlinear_matmul< let k = *a_shape[a_ndim - 1]; let n = *b_shape[b_ndim - 1]; - let mut a_shape_reduced = ArrayTrait::::new(); + let mut a_shape_reduced: Array = array![]; a_shape_reduced.append(m); a_shape_reduced.append(k); - let mut b_shape_reduced = ArrayTrait::::new(); + let mut b_shape_reduced: Array = array![]; b_shape_reduced.append(k); b_shape_reduced.append(n); let mut i = 0; - loop { - if i == stride(a_shape) / (m * k) { - break; - }; - + while i != stride(a_shape) / (m * k) { result_updates( @subtensor(@dequantized_a, i * (m * k), a_shape_reduced.span()), @subtensor(@dequantized_b, i * (k * n), b_shape_reduced.span()), @@ -95,21 +86,21 @@ fn qlinear_matmul< ); i += 1; }; + x_shape(ref x_shape, a_shape, m, n); let x = TensorTrait::new(x_shape.span(), x_data.span()); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + + quantize_linear(@x, y_scale, y_zero_point, min, max) } fn x_shape(ref x_data: Array, mut shape: Span, m: usize, n: usize) { - loop { - if shape.len() == 2 { - break; - } + while shape.len() != 2 { match shape.pop_front() { Option::Some(elem) => { x_data.append(*elem); }, Option::None => { break; } }; }; + x_data.append(m); x_data.append(n); } @@ -125,7 +116,8 @@ fn stride(mut shape: Span) -> usize { Option::None => { break; } }; }; - return accumulated; + + accumulated } fn subtensor, impl TCopy: Copy, impl TDrop: Drop>( @@ -135,14 +127,12 @@ fn subtensor, impl TCopy: Copy, impl TDrop: D let mut stride = stride(shape); let mut i = 0; - loop { - if i == stride { - break; - } + while i != stride { data.append(*x.data[start + i]); i += 1; }; - return TensorTrait::new(shape, data.span()); + + TensorTrait::new(shape, data.span()) } @@ -165,29 +155,17 @@ fn result_updates< let mat1 = *mat1.data; let mat2 = *mat2.data; - let mut result_shape = ArrayTrait::new(); + let mut result_shape: Array = array![]; result_shape.append(m); result_shape.append(p); let mut i = 0_usize; - loop { - if i == m { - break (); - } - + while i != m { let mut j = 0_usize; - loop { - if j == p { - break (); - } - + while j != p { let mut sum: T = NumberTrait::zero(); let mut k = 0_usize; - loop { - if k == n { - break (); - } - + while k != n { let mat1_index = i * n + k; let mat2_index = k * p + j; sum += *mat1[mat1_index] * *mat2[mat2_index]; diff --git a/src/operators/tensor/quantization/qlinear_mul.cairo b/src/operators/tensor/quantization/qlinear_mul.cairo index 4c952c3f9..4a243b1d5 100644 --- a/src/operators/tensor/quantization/qlinear_mul.cairo +++ b/src/operators/tensor/quantization/qlinear_mul.cairo @@ -1,7 +1,3 @@ -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; - use orion::numbers::{NumberTrait}; use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; use orion::operators::tensor::quantization::quantize_linear::quantize_linear; @@ -49,6 +45,6 @@ fn qlinear_mul< let mut x = (dequantized_a * dequantized_b).into(); - return quantize_linear(@x, y_scale, y_zero_point, min, max); + quantize_linear(@x, y_scale, y_zero_point, min, max) } diff --git a/src/operators/tensor/quantization/quantize_linear.cairo b/src/operators/tensor/quantization/quantize_linear.cairo index 90633516a..58466e563 100644 --- a/src/operators/tensor/quantization/quantize_linear.cairo +++ b/src/operators/tensor/quantization/quantize_linear.cairo @@ -1,9 +1,3 @@ -use core::debug::PrintTrait; -use core::array::ArrayTrait; -use core::array::SpanTrait; -use core::option::OptionTrait; -use core::traits::TryInto; - use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::helpers::check_compatibility; use orion::operators::tensor::math::arithmetic::saturated_add; @@ -33,6 +27,7 @@ fn quantize_linear< check_compatibility(*x.shape, *y_scale.shape); check_compatibility(*x.shape, *y_zero_point.shape); check_compatibility(*y_scale.shape, *y_zero_point.shape); + quantize_per_axis(x, y_scale, y_zero_point, min, max) } } @@ -70,7 +65,7 @@ fn quantize_element_wise< >( mut x: Tensor::, y_scale: T, y_zero_point: T, min: T, max: T ) -> Tensor:: { - let mut result_data = ArrayTrait::::new(); + let mut result_data: Array = array![]; loop { match x.data.pop_front() { @@ -82,7 +77,7 @@ fn quantize_element_wise< }; }; - return TensorTrait::new(x.shape, result_data.span()); + TensorTrait::new(x.shape, result_data.span()) } fn quantize< diff --git a/tests/nodes/random_uniform_like_fp16x16.cairo b/tests/nodes/random_uniform_like_fp16x16.cairo index 951a567b8..cc8fa60a9 100644 --- a/tests/nodes/random_uniform_like_fp16x16.cairo +++ b/tests/nodes/random_uniform_like_fp16x16.cairo @@ -15,7 +15,12 @@ fn test_random_uniform_like_fp16x16() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = TensorTrait::random_uniform_like(@input_0, Option::Some(FP16x16 { mag: 655360, sign: false }),Option::Some(FP16x16 { mag: 65536, sign: false }), Option::Some(354145)); + let y_0 = TensorTrait::random_uniform_like( + @input_0, + Option::Some(FP16x16 { mag: 655360, sign: false }), + Option::Some(FP16x16 { mag: 65536, sign: false }), + Option::Some(354145) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/random_uniform_like_fp8x23.cairo b/tests/nodes/random_uniform_like_fp8x23.cairo index 06c1ad47d..b4192e536 100644 --- a/tests/nodes/random_uniform_like_fp8x23.cairo +++ b/tests/nodes/random_uniform_like_fp8x23.cairo @@ -15,7 +15,12 @@ fn test_random_uniform_like_fp8x23() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = TensorTrait::random_uniform_like(@input_0, Option::Some(FP8x23 { mag: 83886080, sign: false }),Option::Some(FP8x23 { mag: 8388608, sign: false }), Option::Some(354145)); + let y_0 = TensorTrait::random_uniform_like( + @input_0, + Option::Some(FP8x23 { mag: 83886080, sign: false }), + Option::Some(FP8x23 { mag: 8388608, sign: false }), + Option::Some(354145) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_fp16x16.cairo b/tests/nodes/range_fp16x16.cairo index efacd031c..331a4fc61 100644 --- a/tests/nodes/range_fp16x16.cairo +++ b/tests/nodes/range_fp16x16.cairo @@ -13,7 +13,11 @@ use orion::numbers::{FixedTrait, FP16x16}; fn test_range_fp16x16() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(FP16x16 { mag: 65536, sign: false },FP16x16 { mag: 1638400, sign: false },FP16x16 { mag: 196608, sign: false }); + let y_0 = TensorTrait::range( + FP16x16 { mag: 65536, sign: false }, + FP16x16 { mag: 1638400, sign: false }, + FP16x16 { mag: 196608, sign: false } + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_fp8x23.cairo b/tests/nodes/range_fp8x23.cairo index c299a96ab..ac218e22b 100644 --- a/tests/nodes/range_fp8x23.cairo +++ b/tests/nodes/range_fp8x23.cairo @@ -13,7 +13,11 @@ use orion::numbers::{FixedTrait, FP8x23}; fn test_range_fp8x23() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(FP8x23 { mag: 8388608, sign: false },FP8x23 { mag: 41943040, sign: false },FP8x23 { mag: 2516582, sign: false }); + let y_0 = TensorTrait::range( + FP8x23 { mag: 8388608, sign: false }, + FP8x23 { mag: 41943040, sign: false }, + FP8x23 { mag: 2516582, sign: false } + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_i32.cairo b/tests/nodes/range_i32.cairo index 786094089..90580f17c 100644 --- a/tests/nodes/range_i32.cairo +++ b/tests/nodes/range_i32.cairo @@ -13,7 +13,7 @@ use orion::numbers::NumberTrait; fn test_range_i32() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(21,2,-3); + let y_0 = TensorTrait::range(21, 2, -3); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_i8.cairo b/tests/nodes/range_i8.cairo index 90c9917cd..088a2cd7d 100644 --- a/tests/nodes/range_i8.cairo +++ b/tests/nodes/range_i8.cairo @@ -13,7 +13,7 @@ use orion::numbers::NumberTrait; fn test_range_i8() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(-1,25,3); + let y_0 = TensorTrait::range(-1, 25, 3); assert_eq(y_0, z_0); } diff --git a/tests/nodes/range_u32.cairo b/tests/nodes/range_u32.cairo index fcaa30ca0..aacb50f4b 100644 --- a/tests/nodes/range_u32.cairo +++ b/tests/nodes/range_u32.cairo @@ -13,7 +13,7 @@ use orion::numbers::NumberTrait; fn test_range_u32() { let z_0 = output_0::output_0(); - let y_0 = TensorTrait::range(1,25,3); + let y_0 = TensorTrait::range(1, 25, 3); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo b/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo index 1ae01e30b..e98d95345 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_1_6() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![1].span(), array![5].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![1].span(), array![5].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo b/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo index 3fc2a4d28..96ddce2b2 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_2_4() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![2,2,2,2].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![2, 2, 2, 2].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo index 254ade4de..b22a61862 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_3x9_batch() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![7,8,9].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![3].span(), array![7, 8, 9].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo index aa8667bdd..f1e961f08 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_3x9_time() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![9].span(), array![3,2,3,2,3,2,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![9].span(), array![3, 2, 3, 2, 3, 2, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo b/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo index 053f187e1..e1986a7b7 100644 --- a/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo +++ b/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_different_dimensions_4_5() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![5,4,3,2].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![5, 4, 3, 2].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo index 9dcfd511c..697231f17 100644 --- a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo index 518db31ca..af77e1a55 100644 --- a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo index c1bcdbc6a..858c62e43 100644 --- a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo index c2f27748e..4b576add6 100644 --- a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_fp16x16_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo index 350607eae..85a21abf1 100644 --- a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i32_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo index dd47c062e..489ebf8da 100644 --- a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i32_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo index 86f1855e4..4ab9f9559 100644 --- a/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo @@ -16,7 +16,12 @@ fn test_reverse_sequence_i32_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo index 28c9d0a84..2280f61c3 100644 --- a/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo @@ -16,7 +16,12 @@ fn test_reverse_sequence_i32_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo index 7a9ebc438..a2f3e974c 100644 --- a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i8_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo index e99616d89..bbb7556ce 100644 --- a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i8_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo index 9b5afdecf..63434d7df 100644 --- a/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo @@ -15,7 +15,12 @@ fn test_reverse_sequence_i8_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo index a803ef02e..0f10bac72 100644 --- a/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo @@ -16,7 +16,12 @@ fn test_reverse_sequence_i8_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_time_equal_parts.cairo b/tests/nodes/reverse_sequence_time_equal_parts.cairo index a79efe4af..8e102a0ab 100644 --- a/tests/nodes/reverse_sequence_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_time_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo index 9dcfc9735..ce8e125b9 100644 --- a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_2d_batch_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo index d89e73242..d2d9606b0 100644 --- a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo +++ b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_2d_time_equal_parts() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_3x3_batch.cairo b/tests/nodes/reverse_sequence_u32_3x3_batch.cairo index e8ff9ca9d..a95260c33 100644 --- a/tests/nodes/reverse_sequence_u32_3x3_batch.cairo +++ b/tests/nodes/reverse_sequence_u32_3x3_batch.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_3x3_batch() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![3,1,2].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![3].span(), array![3, 1, 2].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_3x3_time.cairo b/tests/nodes/reverse_sequence_u32_3x3_time.cairo index ac2b62361..12978fe32 100644 --- a/tests/nodes/reverse_sequence_u32_3x3_time.cairo +++ b/tests/nodes/reverse_sequence_u32_3x3_time.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_3x3_time() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![1,3,3].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![3].span(), array![1, 3, 3].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_4x4_batch.cairo b/tests/nodes/reverse_sequence_u32_4x4_batch.cairo index ce124a89e..dafd62951 100644 --- a/tests/nodes/reverse_sequence_u32_4x4_batch.cairo +++ b/tests/nodes/reverse_sequence_u32_4x4_batch.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_4x4_batch() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![1, 2, 3, 4].span()), + Option::Some(0), + Option::Some(1) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_4x4_time.cairo b/tests/nodes/reverse_sequence_u32_4x4_time.cairo index a0d9ca889..d7e42272f 100644 --- a/tests/nodes/reverse_sequence_u32_4x4_time.cairo +++ b/tests/nodes/reverse_sequence_u32_4x4_time.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_4x4_time() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![4].span(), array![4, 3, 2, 1].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reverse_sequence_u32_zero_size.cairo b/tests/nodes/reverse_sequence_u32_zero_size.cairo index cabda40b5..34f18742d 100644 --- a/tests/nodes/reverse_sequence_u32_zero_size.cairo +++ b/tests/nodes/reverse_sequence_u32_zero_size.cairo @@ -14,7 +14,12 @@ fn test_reverse_sequence_u32_zero_size() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![0].span(), array![].span()), Option::Some(1), Option::Some(0)); + let y_0 = input_0 + .reverse_sequence( + TensorTrait::::new(array![0].span(), array![].span()), + Option::Some(1), + Option::Some(0) + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add.cairo index 95b09a56d..d182c89d1 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_add.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_add.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_add() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('add')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default.cairo index 80bb892de..5cec477a9 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_default.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_default.cairo @@ -20,7 +20,7 @@ fn test_scatter_nd_fp16x16_3d_default() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + let y_0 = input_0.scatter_nd(updates: input_1, indices: input_2, reduction: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max.cairo index 84e99545c..5399cffa4 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_max.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_max.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_max() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('max')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min.cairo index 9ee1c89b6..51f848437 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_min.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_min.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_min() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('min')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo index 2d5716d98..64fcc74f4 100644 --- a/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp16x16_3d_mul() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('mul')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add.cairo index 3b748994a..f24e9bcd5 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_add.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_add.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_add() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('add')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default.cairo index 75dc57f69..36e7ab220 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_default.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_default.cairo @@ -20,7 +20,7 @@ fn test_scatter_nd_fp8x23_3d_default() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + let y_0 = input_0.scatter_nd(updates: input_1, indices: input_2, reduction: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max.cairo index d09351807..360dc10a3 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_max.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_max.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_max() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('max')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min.cairo index dadc8d27d..39ba6a903 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_min.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_min.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_min() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('min')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo index 853780f6c..bb1756dc2 100644 --- a/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo @@ -20,7 +20,8 @@ fn test_scatter_nd_fp8x23_3d_mul() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('mul')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_add.cairo b/tests/nodes/scatter_nd_u32_add.cairo index cf3c4018b..c1112d27d 100644 --- a/tests/nodes/scatter_nd_u32_add.cairo +++ b/tests/nodes/scatter_nd_u32_add.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_add() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('add')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_default.cairo b/tests/nodes/scatter_nd_u32_default.cairo index 076d44277..3d832f243 100644 --- a/tests/nodes/scatter_nd_u32_default.cairo +++ b/tests/nodes/scatter_nd_u32_default.cairo @@ -18,7 +18,7 @@ fn test_scatter_nd_u32_default() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + let y_0 = input_0.scatter_nd(updates: input_1, indices: input_2, reduction: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_max.cairo b/tests/nodes/scatter_nd_u32_max.cairo index 5d3a4940d..e64e2b84a 100644 --- a/tests/nodes/scatter_nd_u32_max.cairo +++ b/tests/nodes/scatter_nd_u32_max.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_max() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('max')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_min.cairo b/tests/nodes/scatter_nd_u32_min.cairo index 63033d6b7..51b7cb9af 100644 --- a/tests/nodes/scatter_nd_u32_min.cairo +++ b/tests/nodes/scatter_nd_u32_min.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_min() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('min')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_nd_u32_mul.cairo b/tests/nodes/scatter_nd_u32_mul.cairo index b5367e914..8d19773d9 100644 --- a/tests/nodes/scatter_nd_u32_mul.cairo +++ b/tests/nodes/scatter_nd_u32_mul.cairo @@ -18,7 +18,8 @@ fn test_scatter_nd_u32_mul() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + let y_0 = input_0 + .scatter_nd(updates: input_1, indices: input_2, reduction: Option::Some('mul')); assert_eq(y_0, z_0); } diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo index a1ac7a9ec..c585612c3 100644 --- a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_1d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo index 42f0dc900..b10b573e3 100644 --- a/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_1d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span()))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span())) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo index 1c3faf614..010736d45 100644 --- a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_fp16x16_1d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo index 96e743399..7e740f9d7 100644 --- a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_2d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo index bfd1f8cec..2ca6dd030 100644 --- a/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_fp16x16_2d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo index 5cd4e1845..9cdd54aa0 100644 --- a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_fp16x16_2d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo b/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo index e8ecfba30..a9f7bed3e 100644 --- a/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo +++ b/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_fp16x16_zero_size() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo b/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo index 9c14470b5..74995267f 100644 --- a/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_1d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_1d_uneven.cairo b/tests/nodes/split_to_sequence_u32_1d_uneven.cairo index 0dfb5547f..f72378a1e 100644 --- a/tests/nodes/split_to_sequence_u32_1d_uneven.cairo +++ b/tests/nodes/split_to_sequence_u32_1d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_1d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo b/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo index 4df4fbee7..62a16a2e3 100644 --- a/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_u32_1d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo b/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo index 24c06c857..8cd2fa373 100644 --- a/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_2d_equal_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_2d_uneven.cairo b/tests/nodes/split_to_sequence_u32_2d_uneven.cairo index 7ab6604be..839576e6d 100644 --- a/tests/nodes/split_to_sequence_u32_2d_uneven.cairo +++ b/tests/nodes/split_to_sequence_u32_2d_uneven.cairo @@ -14,7 +14,12 @@ fn test_split_to_sequence_u32_2d_uneven() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo b/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo index dc81b4325..1bb3b017b 100644 --- a/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo +++ b/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_u32_2d_variable_parts() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + let y = input_0 + .split_to_sequence( + 1, + 1, + Option::Some( + TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/nodes/split_to_sequence_u32_zero_size.cairo b/tests/nodes/split_to_sequence_u32_zero_size.cairo index 815ba7d4e..9f24d154d 100644 --- a/tests/nodes/split_to_sequence_u32_zero_size.cairo +++ b/tests/nodes/split_to_sequence_u32_zero_size.cairo @@ -14,7 +14,14 @@ fn test_split_to_sequence_u32_zero_size() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),))); + let y = input_0 + .split_to_sequence( + 0, + 1, + Option::Some( + TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),) + ) + ); assert_seq_eq(y, z); } diff --git a/tests/operators/optional/optional_get_element_test.cairo b/tests/operators/optional/optional_get_element_test.cairo index 576e12cd6..ef6c68097 100644 --- a/tests/operators/optional/optional_get_element_test.cairo +++ b/tests/operators/optional/optional_get_element_test.cairo @@ -14,17 +14,7 @@ fn optional_get_element_i8_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let ele = optional_get_element(a.optional()); @@ -67,4 +57,4 @@ fn optional_get_element_fp16x16_test() { assert(*(ele.data).at(5) == *(a.data).at(5), 'ele[5] == a[5]'); assert(*(ele.data).at(6) == *(a.data).at(6), 'ele[6] == a[6]'); assert(*(ele.data).at(7) == *(a.data).at(7), 'ele[7] == a[7]'); -} \ No newline at end of file +} diff --git a/tests/operators/optional/optional_has_element_test.cairo b/tests/operators/optional/optional_has_element_test.cairo index f08bdcc73..7aedbb9bd 100644 --- a/tests/operators/optional/optional_has_element_test.cairo +++ b/tests/operators/optional/optional_has_element_test.cairo @@ -14,17 +14,7 @@ fn optional_has_element_i8_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let a_optional = a.optional(); let has_ele = optional_has_element(a_optional); @@ -64,4 +54,4 @@ fn optional_has_element_none_test() { let has_ele = optional_has_element(a); assert(*(has_ele.data).at(0) == false, 'has_ele[0] == false'); -} \ No newline at end of file +} diff --git a/tests/operators/optional/optional_test.cairo b/tests/operators/optional/optional_test.cairo index 3632e173a..06c31e3b4 100644 --- a/tests/operators/optional/optional_test.cairo +++ b/tests/operators/optional/optional_test.cairo @@ -14,28 +14,42 @@ fn optional_i8_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let a_optional = a.optional(); - assert(*(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), 'a_optional[0] == Option(a)[0]'); - assert(*(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), 'a_optional[1] == Option(a)[1]'); - assert(*(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), 'a_optional[2] == Option(a)[2]'); - assert(*(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), 'a_optional[3] == Option(a)[3]'); - assert(*(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), 'a_optional[4] == Option(a)[4]'); - assert(*(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), 'a_optional[5] == Option(a)[5]'); - assert(*(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), 'a_optional[6] == Option(a)[6]'); - assert(*(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), 'a_optional[7] == Option(a)[7]'); + assert( + *(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), + 'a_optional[0] == Option(a)[0]' + ); + assert( + *(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), + 'a_optional[1] == Option(a)[1]' + ); + assert( + *(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), + 'a_optional[2] == Option(a)[2]' + ); + assert( + *(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), + 'a_optional[3] == Option(a)[3]' + ); + assert( + *(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), + 'a_optional[4] == Option(a)[4]' + ); + assert( + *(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), + 'a_optional[5] == Option(a)[5]' + ); + assert( + *(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), + 'a_optional[6] == Option(a)[6]' + ); + assert( + *(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), + 'a_optional[7] == Option(a)[7]' + ); } #[test] @@ -59,12 +73,36 @@ fn optional_fp16x16_test() { ); let a_optional = a.optional(); - assert(*(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), 'a_optional[0] == Option(a)[0]'); - assert(*(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), 'a_optional[1] == Option(a)[1]'); - assert(*(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), 'a_optional[2] == Option(a)[2]'); - assert(*(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), 'a_optional[3] == Option(a)[3]'); - assert(*(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), 'a_optional[4] == Option(a)[4]'); - assert(*(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), 'a_optional[5] == Option(a)[5]'); - assert(*(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), 'a_optional[6] == Option(a)[6]'); - assert(*(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), 'a_optional[7] == Option(a)[7]'); -} \ No newline at end of file + assert( + *(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), + 'a_optional[0] == Option(a)[0]' + ); + assert( + *(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), + 'a_optional[1] == Option(a)[1]' + ); + assert( + *(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), + 'a_optional[2] == Option(a)[2]' + ); + assert( + *(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), + 'a_optional[3] == Option(a)[3]' + ); + assert( + *(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), + 'a_optional[4] == Option(a)[4]' + ); + assert( + *(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), + 'a_optional[5] == Option(a)[5]' + ); + assert( + *(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), + 'a_optional[6] == Option(a)[6]' + ); + assert( + *(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), + 'a_optional[7] == Option(a)[7]' + ); +} diff --git a/tests/performance.cairo b/tests/performance.cairo index da71869ed..14012d1cb 100644 --- a/tests/performance.cairo +++ b/tests/performance.cairo @@ -1,3 +1,3 @@ mod quantize_linear_test; mod dequantize_linear_test; -mod dynamic_quantize_linear_test; \ No newline at end of file +mod dynamic_quantize_linear_test; diff --git a/tests/performance/dynamic_quantize_linear_test.cairo b/tests/performance/dynamic_quantize_linear_test.cairo index bbd43eb29..dc6c5315b 100644 --- a/tests/performance/dynamic_quantize_linear_test.cairo +++ b/tests/performance/dynamic_quantize_linear_test.cairo @@ -1 +1 @@ -mod dynamic_quantize_linear_fp_test; \ No newline at end of file +mod dynamic_quantize_linear_fp_test; diff --git a/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo b/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo index e1817dff9..c16277b85 100644 --- a/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo +++ b/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo @@ -19,11 +19,11 @@ mod fp8x23 { shape.append(6); let mut data = ArrayTrait::::new(); data.append(FixedTrait::new(0, false)); - data.append(FixedTrait::new(587203, false)); // 0.07 - data.append(FixedTrait::new(838861, false)); // 0.1 - data.append(FixedTrait::new(1677722, false)); // 0.2 - data.append(FixedTrait::new(4194304, false)); // 0.5 - data.append(FixedTrait::new(7549747, false)); // 0.9 + data.append(FixedTrait::new(587203, false)); // 0.07 + data.append(FixedTrait::new(838861, false)); // 0.1 + data.append(FixedTrait::new(1677722, false)); // 0.2 + data.append(FixedTrait::new(4194304, false)); // 0.5 + data.append(FixedTrait::new(7549747, false)); // 0.9 let x = TensorTrait::new(shape.span(), data.span()); @@ -61,12 +61,12 @@ mod fp16x16 { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::::new(); - data.append(FixedTrait::new(10945, false)); // 0.167 - data.append(FixedTrait::new(190054, false)); // 2.9 - data.append(FixedTrait::new_unscaled(3, false)); // 3.0 - data.append(FixedTrait::new(229376, false)); // 3.5 - data.append(FixedTrait::new_unscaled(3, true)); // -3.0 - data.append(FixedTrait::new(229376, true)); // -3.5 + data.append(FixedTrait::new(10945, false)); // 0.167 + data.append(FixedTrait::new(190054, false)); // 2.9 + data.append(FixedTrait::new_unscaled(3, false)); // 3.0 + data.append(FixedTrait::new(229376, false)); // 3.5 + data.append(FixedTrait::new_unscaled(3, true)); // -3.0 + data.append(FixedTrait::new(229376, true)); // -3.5 let x = TensorTrait::new(shape.span(), data.span()); From 677168cffd5d17bd4d3fad9146e5d9abd95611d8 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 19 Feb 2024 19:24:53 +0100 Subject: [PATCH 23/40] remove shape length check --- src/operators/tensor/helpers.cairo | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 931eeb0af..e60b38b72 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -51,8 +51,6 @@ fn check_shape(shape: Span, data: Span) { /// # Panics /// * Panics if the shapes are not compatible for broadcasting. fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { - assert(shape_1.len() == shape_2.len(), 'tensors shape must match'); - loop { match shape_1.pop_front() { Option::Some(shape_1_val) => { From 39c66d66473f1f0f70e04aa12a194a2baf4ea0d9 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Tue, 20 Feb 2024 12:17:16 +0100 Subject: [PATCH 24/40] fix check_compatibility --- src/operators/tensor/helpers.cairo | 42 +++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index e60b38b72..13eb7f43a 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -51,19 +51,37 @@ fn check_shape(shape: Span, data: Span) { /// # Panics /// * Panics if the shapes are not compatible for broadcasting. fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { - loop { - match shape_1.pop_front() { - Option::Some(shape_1_val) => { - let shape_2_val = *shape_2.pop_front().unwrap(); - - assert( - *shape_1_val == shape_2_val || *shape_1_val == 1 || shape_2_val == 1, - 'tensors shape must match' - ); - }, - Option::None => { break; } + // Start from the last dimension by getting the length of each shape + let mut iter_1 = shape_1.len(); + let mut iter_2 = shape_2.len(); + + // Iterate while there are dimensions left in either shape + while iter_1 > 0 || iter_2 > 0 { + // Get the current dimension for each shape, defaulting to 1 if we've run out of dimensions + let dim_1 = if iter_1 > 0 { + *shape_1[iter_1 - 1] + } else { + 1 }; - }; + let dim_2 = if iter_2 > 0 { + *shape_2[iter_2 - 1] + } else { + 1 + }; + + // Check the broadcasting rule for the current dimension + if dim_1 != dim_2 && dim_1 != 1 && dim_2 != 1 { + panic(array!['tensors shape must match']); + } + + // Move to the next dimension + if iter_1 > 0 { + iter_1 -= 1; + } + if iter_2 > 0 { + iter_2 -= 1; + } + } } /// Computes the index in the broadcasted tensor corresponding to the given indices and shape. From e9cf5a7c3bb209208e915d0ec0ca6c3c626e7d48 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Tue, 20 Feb 2024 20:07:56 +0100 Subject: [PATCH 25/40] fix broadcast_shape and broadcast_index_mapping --- src/operators/nn/functional/gemm.cairo | 13 +--- src/operators/tensor/core.cairo | 26 ++----- src/operators/tensor/helpers.cairo | 74 ++++++++++++++----- tests/nodes/gemm_default_vector_bias.cairo | 1 - .../gemm_default_vector_bias/input_2.cairo | 1 - 5 files changed, 64 insertions(+), 51 deletions(-) diff --git a/src/operators/nn/functional/gemm.cairo b/src/operators/nn/functional/gemm.cairo index c37bda880..e5b997731 100644 --- a/src/operators/nn/functional/gemm.cairo +++ b/src/operators/nn/functional/gemm.cairo @@ -1,4 +1,3 @@ -use alexandria_data_structures::array_ext::SpanTraitExt; use core::array::SpanTrait; use orion::numbers::NumberTrait; @@ -49,16 +48,8 @@ fn gemm< match C { Option::Some(c) => { - let broadcast_c_shape = if c.shape.len() == 1 { - array![1].span().concat(c.shape) - } else { - c.shape - }; - - let c = Tensor { shape: broadcast_c_shape, data: c.data }; - return mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta); }, - Option::None => { return mul_by_scalar(@A.matmul(@B), alpha); } + Option::None(_) => { return mul_by_scalar(@A.matmul(@B), alpha); } } -} +} \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 222b0f423..9342bf328 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1,3 +1,4 @@ +use alexandria_data_structures::array_ext::ArrayTraitExt; use core::array::{ArrayTrait, SpanTrait}; use core::serde::Serde; use core::option::OptionTrait; @@ -5743,33 +5744,22 @@ fn unravel_index(index: usize, mut shape: Span) -> Span { /// Cf: TensorTrait::stride docstring fn stride(mut shape: Span) -> Span { - let shape_len = shape.len(); - assert(shape_len > 0, 'shape cannot be empty'); - - let mut result: Array = ArrayTrait::new(); - let mut accumulated: usize = 1; - let mut temp_result = ArrayTrait::new(); + let mut strides = ArrayTrait::new(); + let mut stride = 1; loop { match shape.pop_back() { - Option::Some(i) => { - temp_result.append(accumulated); - accumulated *= *i; + Option::Some(size) => { + strides.append(stride); + stride *= *size; }, Option::None => { break; } }; }; - let mut temp_result = temp_result.span(); - loop { - match temp_result.pop_back() { - Option::Some(val) => { result.append(*val); }, - Option::None => { break; } - }; - }; - - return result.span(); + strides.reverse().span() } + /// Cf: TensorTrait::reshape docstring fn reshape(self: @Tensor, target_shape: Span) -> Tensor { new_tensor(target_shape, *self.data) diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 13eb7f43a..a781be259 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -97,7 +97,15 @@ fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { /// # Returns /// * A usize representing the index in the broadcasted tensor. fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> usize { - assert(shape.len() == indices.len(), 'shape/indices len must be equal'); + if shape.len() == indices.len() { + broadcast_index_mapping_equal_shape(shape, indices) + } else { + broadcast_index_mapping_non_equal_shape(shape, indices) + } +} + + +fn broadcast_index_mapping_equal_shape(mut shape: Span, mut indices: Span) -> usize { let mut result = 0_usize; let mut stride = stride(shape); @@ -117,6 +125,47 @@ fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> return result; } +fn broadcast_index_mapping_non_equal_shape( + mut shape: Span, mut indices: Span +) -> usize { + let mut result = 0_usize; + let mut stride = stride(shape.clone()); + + // Calculate the offset to align indices with the rightmost dimensions of the shape + let mut offset = if shape.len() > indices.len() { + shape.len() - indices.len() + } else { + 0 + }; + + loop { + match shape.pop_back() { + Option::Some(_) => { + let stride_val = stride + .pop_back() + .unwrap_or(@1); // Default stride for non-existent dimensions is 1 + + // Calculate the index, using 0 for dimensions beyond the length of indices + let index_val = if offset > 0 { + offset -= 1; // Decrement offset until we align indices with the shape + 0 // Use 0 for indices beyond the length of the indices span + } else { + *indices + .pop_back() + .unwrap_or(@0) // Use actual index value or 0 if indices are exhausted + }; + + let index = index_val * *stride_val; + result += index; + }, + Option::None => { break; } + }; + }; + + result +} + + /// Generates the output shape after reducing a tensor along a specified axis. /// /// # Arguments @@ -272,32 +321,17 @@ fn broadcast_shape(mut shape1: Span, mut shape2: Span) -> Span = ArrayTrait::new(); - loop { - let mut dim1 = 1; - let mut dim2 = 1; - - match shape1.pop_front() { - Option::Some(item) => { dim1 = *item; }, - Option::None => { if shape1.len() == 0 && shape2.len() == 0 { - break (); - }; } - }; - - match shape2.pop_front() { - Option::Some(item) => { dim2 = *item; }, - Option::None => { if shape1.len() == 0 && shape2.len() == 0 { - break (); - }; } - }; + while !shape1.is_empty() || !shape2.is_empty() { + let dim1 = *shape1.pop_back().unwrap_or(@1); + let dim2 = *shape2.pop_back().unwrap_or(@1); let broadcasted_dim = u32_max(dim1, dim2); result.append(broadcasted_dim); }; - return result.span(); + return result.reverse().span(); } - /// Substitute a value in a shape at a given index /// /// # Arguments diff --git a/tests/nodes/gemm_default_vector_bias.cairo b/tests/nodes/gemm_default_vector_bias.cairo index 24826f739..fbed99929 100644 --- a/tests/nodes/gemm_default_vector_bias.cairo +++ b/tests/nodes/gemm_default_vector_bias.cairo @@ -3,7 +3,6 @@ mod input_1; mod input_2; mod output_0; - use orion::operators::nn::NNTrait; use orion::numbers::FixedTrait; use orion::utils::{assert_eq, assert_seq_eq}; diff --git a/tests/nodes/gemm_default_vector_bias/input_2.cairo b/tests/nodes/gemm_default_vector_bias/input_2.cairo index f340a6ea2..e3d351aff 100644 --- a/tests/nodes/gemm_default_vector_bias/input_2.cairo +++ b/tests/nodes/gemm_default_vector_bias/input_2.cairo @@ -5,7 +5,6 @@ use orion::numbers::{FixedTrait, FP16x16}; fn input_2() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(1); shape.append(4); let mut data = ArrayTrait::new(); From b3348955c285b86bc2128e71951b9e5cf73e3cd7 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 22 Feb 2024 10:17:00 +0100 Subject: [PATCH 26/40] refactor unsqueeze to use axes of Span --- src/operators/tensor/core.cairo | 12 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- tests/nodes.cairo | 2082 ++++++++--------- 13 files changed, 1058 insertions(+), 1058 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 9342bf328..bc93491b8 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -3262,7 +3262,7 @@ trait TensorTrait { /// [7]]]] /// ``` /// - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor; + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor; /// # tensor.squeeze /// /// ```rust @@ -6105,13 +6105,13 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { return Tensor:: { shape: target_shape, data: *self.data }; } /// Cf: TensorTrait::unsqueeze docstring -fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { +fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { let dedupped_array = axes.dedup(); assert(dedupped_array.len() == axes.len(), 'Duplicated input axes'); let mut self_shape_copy = *self.shape; - let mut i: usize = 0; - let mut added_axes_count: usize = 0; + let mut i = 0; + let mut added_axes_count = 0; let mut output_shape: Array = ArrayTrait::new(); loop { if axes.contains(i + added_axes_count) { @@ -6128,9 +6128,9 @@ fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { }; }; - let mut j: usize = output_shape.len(); + let mut j = output_shape.len(); loop { - if axes.contains(j) { + if axes.contains(j.into()) { output_shape.append(1); } else { break (); diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index e75405743..cc17b93f2 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -244,7 +244,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 84b3edb21..cbd038244 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -326,7 +326,7 @@ impl Complex64Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 05dd23ea2..243c141c2 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -364,7 +364,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 54aadbd3e..0b5c01c3e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -324,7 +324,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 7402cd761..e3fccc502 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -364,7 +364,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 4477b0025..b86413786 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -364,7 +364,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index c16b7feed..9450d9b86 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -365,7 +365,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_ops::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 0e5ecc074..29387acbb 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -315,7 +315,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 5e637d4ff..37a529a59 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -357,7 +357,7 @@ impl I32Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index a5f9476c1..328dfe78a 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -361,7 +361,7 @@ impl I8Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 00ab75b1d..65d54db63 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -304,7 +304,7 @@ impl U32Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 337715889..8814cfb80 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1041 +1,1041 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmax_fp16x16_1D_default; -// mod argmax_fp16x16_1D_keepdims_false; -// mod argmax_fp16x16_1D_last_index; -// mod argmax_fp16x16_2D_default; -// mod argmax_fp16x16_2D_keepdims_false; -// mod argmax_fp16x16_2D_last_index; -// mod argmax_fp16x16_3D_default; -// mod argmax_fp16x16_3D_keepdims_false; -// mod argmax_fp16x16_3D_last_index; -// mod argmax_fp8x23_1D_default; -// mod argmax_fp8x23_1D_keepdims_false; -// mod argmax_fp8x23_1D_last_index; -// mod argmax_fp8x23_2D_default; -// mod argmax_fp8x23_2D_keepdims_false; -// mod argmax_fp8x23_2D_last_index; -// mod argmax_fp8x23_3D_default; -// mod argmax_fp8x23_3D_keepdims_false; -// mod argmax_fp8x23_3D_last_index; -// mod argmax_i32_1D_default; -// mod argmax_i32_1D_keepdims_false; -// mod argmax_i32_1D_last_index; -// mod argmax_i32_2D_default; -// mod argmax_i32_2D_keepdims_false; -// mod argmax_i32_2D_last_index; -// mod argmax_i32_3D_default; -// mod argmax_i32_3D_keepdims_false; -// mod argmax_i32_3D_last_index; -// mod argmax_i8_1D_default; -// mod argmax_i8_1D_keepdims_false; -// mod argmax_i8_1D_last_index; -// mod argmax_i8_2D_default; -// mod argmax_i8_2D_keepdims_false; -// mod argmax_i8_2D_last_index; -// mod argmax_i8_3D_default; -// mod argmax_i8_3D_keepdims_false; -// mod argmax_i8_3D_last_index; -// mod argmax_u32_1D_default; -// mod argmax_u32_1D_keepdims_false; -// mod argmax_u32_1D_last_index; -// mod argmax_u32_2D_default; -// mod argmax_u32_2D_keepdims_false; -// mod argmax_u32_2D_last_index; -// mod argmax_u32_3D_default; -// mod argmax_u32_3D_keepdims_false; -// mod argmax_u32_3D_last_index; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod reduce_sum_fp16x16_1D; -// mod reduce_sum_fp16x16_2D_default; -// mod reduce_sum_fp16x16_2D_keepdims; -// mod reduce_sum_fp16x16_2D_axis_1; -// mod reduce_sum_fp8x23_1D; -// mod reduce_sum_fp8x23_2D_default; -// mod reduce_sum_fp8x23_2D_keepdims; -// mod reduce_sum_fp8x23_2D_axis_1; -// mod reduce_sum_i32_1D; -// mod reduce_sum_i32_2D_default; -// mod reduce_sum_i32_2D_keepdims; -// mod reduce_sum_i32_2D_axis_1; -// mod reduce_sum_i8_1D; -// mod reduce_sum_i8_2D_default; -// mod reduce_sum_i8_2D_keepdims; -// mod reduce_sum_i8_2D_axis_1; -// mod reduce_sum_u32_1D; -// mod reduce_sum_u32_2D_default; -// mod reduce_sum_u32_2D_keepdims; -// mod reduce_sum_u32_2D_axis_1; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softmax_fp16x16; -// mod softmax_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod less_fp16x16; -// mod less_fp16x16_broadcast; -// mod less_fp8x23; -// mod less_fp8x23_broadcast; -// mod less_i32; -// mod less_i32_broadcast; -// mod less_i8; -// mod less_i8_broadcast; -// mod less_u32; -// mod less_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod gather_fp8x23_3d_default; -// mod gather_fp8x23_3d_axis1; -// mod gather_fp8x23_3d_axis2; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_i8_3d_default; -// mod gather_i8_3d_axis1; -// mod gather_i8_3d_axis2; -// mod gather_i32_3d_default; -// mod gather_i32_3d_axis1; -// mod gather_i32_3d_axis2; -// mod gather_u32_3d_default; -// mod gather_u32_3d_axis1; -// mod gather_u32_3d_axis2; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod gather_elements_fp16x16_3d_default; -// mod gather_elements_fp16x16_3d_axis1; -// mod gather_elements_fp16x16_3d_axis2; -// mod gather_elements_fp8x23_3d_default; -// mod gather_elements_fp8x23_3d_axis1; -// mod gather_elements_fp8x23_3d_axis2; -// mod gather_elements_i8_3d_default; -// mod gather_elements_i8_3d_axis1; -// mod gather_elements_i32_3d_default; -// mod gather_elements_i32_3d_axis1; -// mod gather_elements_i32_3d_axis2; -// mod gather_elements_u32_default; -// mod gather_elements_u32_axis1; -// mod gather_elements_u32_axis2; -// mod gather_elements_u32_axis3; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_nan_fp8x23; -// mod is_inf_fp16x16; -// mod is_inf_fp8x23; -// mod is_inf_i32; -// mod is_inf_i8; -// mod is_inf_u32; -// mod is_pos_inf_fp16x16; -// mod is_neg_inf_fp16x16; -// mod is_pos_inf_fp8x23; -// mod is_neg_inf_fp8x23; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -// mod is_pos_inf_i8; -// mod is_neg_inf_i8; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; -// mod grid_sample; -// mod grid_sample_cubic; -// mod grid_sample_aligncorners; -// mod grid_sample_nearest; -// mod grid_sample_nearest_aligncorner; -// mod grid_sample_padding_border; -// mod grid_sample_padding_reflection; -// mod grid_sample_padding_zeros; -// mod col2im; -// mod col2im_5D; -// mod col2im_dilations; -// mod col2im_pads; -// mod col2im_strides; -// mod random_uniform_like_fp16x16; -// mod random_uniform_like_fp8x23; -// mod range_fp8x23; -// mod range_fp16x16; -// mod range_i32; -// mod range_i8; -// mod range_u32; -// mod hann_window_fp8x23; -// mod hann_window_fp16x16; -// mod hamming_window_fp16x16; -// mod hamming_window_fp8x23; -// mod blackman_window_fp16x16; -// mod blackman_window_fp8x23; -// mod split_to_sequence_fp16x16_1d_equal_parts; -// mod split_to_sequence_fp16x16_1d_variable_parts; -// mod split_to_sequence_fp16x16_2d_equal_parts; -// mod split_to_sequence_fp16x16_2d_variable_parts; -// mod split_to_sequence_fp16x16_zero_size; -// mod split_to_sequence_fp16x16_1d_uneven; -// mod split_to_sequence_fp16x16_2d_uneven; -// mod split_to_sequence_u32_1d_equal_parts; -// mod split_to_sequence_u32_1d_variable_parts; -// mod split_to_sequence_u32_2d_equal_parts; -// mod split_to_sequence_u32_2d_variable_parts; -// mod split_to_sequence_u32_zero_size; -// mod split_to_sequence_u32_1d_uneven; -// mod split_to_sequence_u32_2d_uneven; -// mod split_to_sequence_2d_scalar; -// mod split_to_sequence_2d_nokeepdims; -// mod split_to_sequence_1d_nokeepdims; -// mod reverse_sequence_fp16x16_batch_equal_parts; -// mod reverse_sequence_fp16x16_time_equal_parts; -// mod reverse_sequence_i32_batch_equal_parts; -// mod reverse_sequence_i32_time_equal_parts; -// mod reverse_sequence_i8_batch_equal_parts; -// mod reverse_sequence_i8_time_equal_parts; -// mod reverse_sequence_u32_4x4_batch; -// mod reverse_sequence_u32_4x4_time; -// mod reverse_sequence_u32_3x3_batch; -// mod reverse_sequence_u32_3x3_time; -// mod reverse_sequence_different_dimensions_4_5; -// mod reverse_sequence_different_dimensions_2_4; -// mod reverse_sequence_different_dimensions_1_6; -// mod reverse_sequence_different_dimensions_3x9_batch; -// mod reverse_sequence_different_dimensions_3x9_time; -// mod conv_transpose; -// mod conv_transpose_1d; -// mod conv_transpose_3d; -// mod conv_transpose_attributes; -// mod conv_transpose_autopad_same; -// mod conv_transpose_dilations; -// mod conv_transpose_pads; -// mod conv_transpose_group_2; -// mod conv_transpose_group_2_image_3; -// mod depth_to_space_fp16x16; -// mod depth_to_space_fp8x23; -// mod depth_to_space_i32; -// mod depth_to_space_i8; -// mod depth_to_space_u32; -// mod space_to_depth_fp16x16; -// mod space_to_depth_fp8x23; -// mod space_to_depth_i32; -// mod space_to_depth_i8; -// mod space_to_depth_u32; -// mod scatter_nd_fp16x16_3d_default; -// mod scatter_nd_fp16x16_3d_add; -// mod scatter_nd_fp16x16_3d_mul; -// mod scatter_nd_fp16x16_3d_max; -// mod scatter_nd_fp16x16_3d_min; -// mod scatter_nd_fp8x23_3d_default; -// mod scatter_nd_fp8x23_3d_add; -// mod scatter_nd_fp8x23_3d_mul; -// mod scatter_nd_fp8x23_3d_max; -// mod scatter_nd_fp8x23_3d_min; -// mod scatter_nd_u32_default; -// mod scatter_nd_u32_add; -// mod scatter_nd_u32_mul; -// mod scatter_nd_u32_max; -// mod scatter_nd_u32_min; -// mod conv_2D_with_padding; -// mod conv_1D_no_padding; -// mod conv_1D_with_padding; -// mod conv_3D_no_padding; -// mod conv_3D_with_padding; -// mod conv_4D_no_padding; -// mod conv_2D_with_2_groups; -// mod conv_2D_with_autopad_same; -// mod conv_2D_with_strides_asymmetric_padding; -// mod conv_2D_with_strides_with_padding; -// mod conv_4D_with_padding; +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmax_fp16x16_1D_default; +mod argmax_fp16x16_1D_keepdims_false; +mod argmax_fp16x16_1D_last_index; +mod argmax_fp16x16_2D_default; +mod argmax_fp16x16_2D_keepdims_false; +mod argmax_fp16x16_2D_last_index; +mod argmax_fp16x16_3D_default; +mod argmax_fp16x16_3D_keepdims_false; +mod argmax_fp16x16_3D_last_index; +mod argmax_fp8x23_1D_default; +mod argmax_fp8x23_1D_keepdims_false; +mod argmax_fp8x23_1D_last_index; +mod argmax_fp8x23_2D_default; +mod argmax_fp8x23_2D_keepdims_false; +mod argmax_fp8x23_2D_last_index; +mod argmax_fp8x23_3D_default; +mod argmax_fp8x23_3D_keepdims_false; +mod argmax_fp8x23_3D_last_index; +mod argmax_i32_1D_default; +mod argmax_i32_1D_keepdims_false; +mod argmax_i32_1D_last_index; +mod argmax_i32_2D_default; +mod argmax_i32_2D_keepdims_false; +mod argmax_i32_2D_last_index; +mod argmax_i32_3D_default; +mod argmax_i32_3D_keepdims_false; +mod argmax_i32_3D_last_index; +mod argmax_i8_1D_default; +mod argmax_i8_1D_keepdims_false; +mod argmax_i8_1D_last_index; +mod argmax_i8_2D_default; +mod argmax_i8_2D_keepdims_false; +mod argmax_i8_2D_last_index; +mod argmax_i8_3D_default; +mod argmax_i8_3D_keepdims_false; +mod argmax_i8_3D_last_index; +mod argmax_u32_1D_default; +mod argmax_u32_1D_keepdims_false; +mod argmax_u32_1D_last_index; +mod argmax_u32_2D_default; +mod argmax_u32_2D_keepdims_false; +mod argmax_u32_2D_last_index; +mod argmax_u32_3D_default; +mod argmax_u32_3D_keepdims_false; +mod argmax_u32_3D_last_index; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod reduce_sum_fp16x16_1D; +mod reduce_sum_fp16x16_2D_default; +mod reduce_sum_fp16x16_2D_keepdims; +mod reduce_sum_fp16x16_2D_axis_1; +mod reduce_sum_fp8x23_1D; +mod reduce_sum_fp8x23_2D_default; +mod reduce_sum_fp8x23_2D_keepdims; +mod reduce_sum_fp8x23_2D_axis_1; +mod reduce_sum_i32_1D; +mod reduce_sum_i32_2D_default; +mod reduce_sum_i32_2D_keepdims; +mod reduce_sum_i32_2D_axis_1; +mod reduce_sum_i8_1D; +mod reduce_sum_i8_2D_default; +mod reduce_sum_i8_2D_keepdims; +mod reduce_sum_i8_2D_axis_1; +mod reduce_sum_u32_1D; +mod reduce_sum_u32_2D_default; +mod reduce_sum_u32_2D_keepdims; +mod reduce_sum_u32_2D_axis_1; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softmax_fp16x16; +mod softmax_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod gather_fp8x23_3d_default; +mod gather_fp8x23_3d_axis1; +mod gather_fp8x23_3d_axis2; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_i8_3d_default; +mod gather_i8_3d_axis1; +mod gather_i8_3d_axis2; +mod gather_i32_3d_default; +mod gather_i32_3d_axis1; +mod gather_i32_3d_axis2; +mod gather_u32_3d_default; +mod gather_u32_3d_axis1; +mod gather_u32_3d_axis2; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod gather_elements_fp16x16_3d_default; +mod gather_elements_fp16x16_3d_axis1; +mod gather_elements_fp16x16_3d_axis2; +mod gather_elements_fp8x23_3d_default; +mod gather_elements_fp8x23_3d_axis1; +mod gather_elements_fp8x23_3d_axis2; +mod gather_elements_i8_3d_default; +mod gather_elements_i8_3d_axis1; +mod gather_elements_i32_3d_default; +mod gather_elements_i32_3d_axis1; +mod gather_elements_i32_3d_axis2; +mod gather_elements_u32_default; +mod gather_elements_u32_axis1; +mod gather_elements_u32_axis2; +mod gather_elements_u32_axis3; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; From 3690939e5c79fbddf6a2eabb89a3e8fd707c5aa9 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 22 Feb 2024 10:41:45 +0100 Subject: [PATCH 27/40] Revert "refactor unsqueeze to use axes of Span" This reverts commit b3348955c285b86bc2128e71951b9e5cf73e3cd7. --- src/operators/tensor/core.cairo | 12 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- tests/nodes.cairo | 2082 ++++++++--------- 13 files changed, 1058 insertions(+), 1058 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index bc93491b8..9342bf328 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -3262,7 +3262,7 @@ trait TensorTrait { /// [7]]]] /// ``` /// - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor; + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor; /// # tensor.squeeze /// /// ```rust @@ -6105,13 +6105,13 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { return Tensor:: { shape: target_shape, data: *self.data }; } /// Cf: TensorTrait::unsqueeze docstring -fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { +fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { let dedupped_array = axes.dedup(); assert(dedupped_array.len() == axes.len(), 'Duplicated input axes'); let mut self_shape_copy = *self.shape; - let mut i = 0; - let mut added_axes_count = 0; + let mut i: usize = 0; + let mut added_axes_count: usize = 0; let mut output_shape: Array = ArrayTrait::new(); loop { if axes.contains(i + added_axes_count) { @@ -6128,9 +6128,9 @@ fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { }; }; - let mut j = output_shape.len(); + let mut j: usize = output_shape.len(); loop { - if axes.contains(j.into()) { + if axes.contains(j) { output_shape.append(1); } else { break (); diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index cc17b93f2..e75405743 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -244,7 +244,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index cbd038244..84b3edb21 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -326,7 +326,7 @@ impl Complex64Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 243c141c2..05dd23ea2 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -364,7 +364,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 0b5c01c3e..54aadbd3e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -324,7 +324,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index e3fccc502..7402cd761 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -364,7 +364,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index b86413786..4477b0025 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -364,7 +364,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 9450d9b86..c16b7feed 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -365,7 +365,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_ops::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 29387acbb..0e5ecc074 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -315,7 +315,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 37a529a59..5e637d4ff 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -357,7 +357,7 @@ impl I32Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 328dfe78a..a5f9476c1 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -361,7 +361,7 @@ impl I8Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 65d54db63..00ab75b1d 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -304,7 +304,7 @@ impl U32Tensor of TensorTrait { core_tensor::squeeze(self, axes) } - fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { + fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { core_tensor::unsqueeze(self, axes) } diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 8814cfb80..337715889 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1041 +1,1041 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod reduce_sum_fp16x16_1D; -mod reduce_sum_fp16x16_2D_default; -mod reduce_sum_fp16x16_2D_keepdims; -mod reduce_sum_fp16x16_2D_axis_1; -mod reduce_sum_fp8x23_1D; -mod reduce_sum_fp8x23_2D_default; -mod reduce_sum_fp8x23_2D_keepdims; -mod reduce_sum_fp8x23_2D_axis_1; -mod reduce_sum_i32_1D; -mod reduce_sum_i32_2D_default; -mod reduce_sum_i32_2D_keepdims; -mod reduce_sum_i32_2D_axis_1; -mod reduce_sum_i8_1D; -mod reduce_sum_i8_2D_default; -mod reduce_sum_i8_2D_keepdims; -mod reduce_sum_i8_2D_axis_1; -mod reduce_sum_u32_1D; -mod reduce_sum_u32_2D_default; -mod reduce_sum_u32_2D_keepdims; -mod reduce_sum_u32_2D_axis_1; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmax_fp16x16_1D_default; +// mod argmax_fp16x16_1D_keepdims_false; +// mod argmax_fp16x16_1D_last_index; +// mod argmax_fp16x16_2D_default; +// mod argmax_fp16x16_2D_keepdims_false; +// mod argmax_fp16x16_2D_last_index; +// mod argmax_fp16x16_3D_default; +// mod argmax_fp16x16_3D_keepdims_false; +// mod argmax_fp16x16_3D_last_index; +// mod argmax_fp8x23_1D_default; +// mod argmax_fp8x23_1D_keepdims_false; +// mod argmax_fp8x23_1D_last_index; +// mod argmax_fp8x23_2D_default; +// mod argmax_fp8x23_2D_keepdims_false; +// mod argmax_fp8x23_2D_last_index; +// mod argmax_fp8x23_3D_default; +// mod argmax_fp8x23_3D_keepdims_false; +// mod argmax_fp8x23_3D_last_index; +// mod argmax_i32_1D_default; +// mod argmax_i32_1D_keepdims_false; +// mod argmax_i32_1D_last_index; +// mod argmax_i32_2D_default; +// mod argmax_i32_2D_keepdims_false; +// mod argmax_i32_2D_last_index; +// mod argmax_i32_3D_default; +// mod argmax_i32_3D_keepdims_false; +// mod argmax_i32_3D_last_index; +// mod argmax_i8_1D_default; +// mod argmax_i8_1D_keepdims_false; +// mod argmax_i8_1D_last_index; +// mod argmax_i8_2D_default; +// mod argmax_i8_2D_keepdims_false; +// mod argmax_i8_2D_last_index; +// mod argmax_i8_3D_default; +// mod argmax_i8_3D_keepdims_false; +// mod argmax_i8_3D_last_index; +// mod argmax_u32_1D_default; +// mod argmax_u32_1D_keepdims_false; +// mod argmax_u32_1D_last_index; +// mod argmax_u32_2D_default; +// mod argmax_u32_2D_keepdims_false; +// mod argmax_u32_2D_last_index; +// mod argmax_u32_3D_default; +// mod argmax_u32_3D_keepdims_false; +// mod argmax_u32_3D_last_index; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod reduce_sum_fp16x16_1D; +// mod reduce_sum_fp16x16_2D_default; +// mod reduce_sum_fp16x16_2D_keepdims; +// mod reduce_sum_fp16x16_2D_axis_1; +// mod reduce_sum_fp8x23_1D; +// mod reduce_sum_fp8x23_2D_default; +// mod reduce_sum_fp8x23_2D_keepdims; +// mod reduce_sum_fp8x23_2D_axis_1; +// mod reduce_sum_i32_1D; +// mod reduce_sum_i32_2D_default; +// mod reduce_sum_i32_2D_keepdims; +// mod reduce_sum_i32_2D_axis_1; +// mod reduce_sum_i8_1D; +// mod reduce_sum_i8_2D_default; +// mod reduce_sum_i8_2D_keepdims; +// mod reduce_sum_i8_2D_axis_1; +// mod reduce_sum_u32_1D; +// mod reduce_sum_u32_2D_default; +// mod reduce_sum_u32_2D_keepdims; +// mod reduce_sum_u32_2D_axis_1; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softmax_fp16x16; +// mod softmax_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod less_fp16x16; +// mod less_fp16x16_broadcast; +// mod less_fp8x23; +// mod less_fp8x23_broadcast; +// mod less_i32; +// mod less_i32_broadcast; +// mod less_i8; +// mod less_i8_broadcast; +// mod less_u32; +// mod less_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod gather_fp8x23_3d_default; +// mod gather_fp8x23_3d_axis1; +// mod gather_fp8x23_3d_axis2; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_i8_3d_default; +// mod gather_i8_3d_axis1; +// mod gather_i8_3d_axis2; +// mod gather_i32_3d_default; +// mod gather_i32_3d_axis1; +// mod gather_i32_3d_axis2; +// mod gather_u32_3d_default; +// mod gather_u32_3d_axis1; +// mod gather_u32_3d_axis2; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod gather_elements_fp16x16_3d_default; +// mod gather_elements_fp16x16_3d_axis1; +// mod gather_elements_fp16x16_3d_axis2; +// mod gather_elements_fp8x23_3d_default; +// mod gather_elements_fp8x23_3d_axis1; +// mod gather_elements_fp8x23_3d_axis2; +// mod gather_elements_i8_3d_default; +// mod gather_elements_i8_3d_axis1; +// mod gather_elements_i32_3d_default; +// mod gather_elements_i32_3d_axis1; +// mod gather_elements_i32_3d_axis2; +// mod gather_elements_u32_default; +// mod gather_elements_u32_axis1; +// mod gather_elements_u32_axis2; +// mod gather_elements_u32_axis3; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_nan_fp8x23; +// mod is_inf_fp16x16; +// mod is_inf_fp8x23; +// mod is_inf_i32; +// mod is_inf_i8; +// mod is_inf_u32; +// mod is_pos_inf_fp16x16; +// mod is_neg_inf_fp16x16; +// mod is_pos_inf_fp8x23; +// mod is_neg_inf_fp8x23; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +// mod is_pos_inf_i8; +// mod is_neg_inf_i8; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; +// mod grid_sample; +// mod grid_sample_cubic; +// mod grid_sample_aligncorners; +// mod grid_sample_nearest; +// mod grid_sample_nearest_aligncorner; +// mod grid_sample_padding_border; +// mod grid_sample_padding_reflection; +// mod grid_sample_padding_zeros; +// mod col2im; +// mod col2im_5D; +// mod col2im_dilations; +// mod col2im_pads; +// mod col2im_strides; +// mod random_uniform_like_fp16x16; +// mod random_uniform_like_fp8x23; +// mod range_fp8x23; +// mod range_fp16x16; +// mod range_i32; +// mod range_i8; +// mod range_u32; +// mod hann_window_fp8x23; +// mod hann_window_fp16x16; +// mod hamming_window_fp16x16; +// mod hamming_window_fp8x23; +// mod blackman_window_fp16x16; +// mod blackman_window_fp8x23; +// mod split_to_sequence_fp16x16_1d_equal_parts; +// mod split_to_sequence_fp16x16_1d_variable_parts; +// mod split_to_sequence_fp16x16_2d_equal_parts; +// mod split_to_sequence_fp16x16_2d_variable_parts; +// mod split_to_sequence_fp16x16_zero_size; +// mod split_to_sequence_fp16x16_1d_uneven; +// mod split_to_sequence_fp16x16_2d_uneven; +// mod split_to_sequence_u32_1d_equal_parts; +// mod split_to_sequence_u32_1d_variable_parts; +// mod split_to_sequence_u32_2d_equal_parts; +// mod split_to_sequence_u32_2d_variable_parts; +// mod split_to_sequence_u32_zero_size; +// mod split_to_sequence_u32_1d_uneven; +// mod split_to_sequence_u32_2d_uneven; +// mod split_to_sequence_2d_scalar; +// mod split_to_sequence_2d_nokeepdims; +// mod split_to_sequence_1d_nokeepdims; +// mod reverse_sequence_fp16x16_batch_equal_parts; +// mod reverse_sequence_fp16x16_time_equal_parts; +// mod reverse_sequence_i32_batch_equal_parts; +// mod reverse_sequence_i32_time_equal_parts; +// mod reverse_sequence_i8_batch_equal_parts; +// mod reverse_sequence_i8_time_equal_parts; +// mod reverse_sequence_u32_4x4_batch; +// mod reverse_sequence_u32_4x4_time; +// mod reverse_sequence_u32_3x3_batch; +// mod reverse_sequence_u32_3x3_time; +// mod reverse_sequence_different_dimensions_4_5; +// mod reverse_sequence_different_dimensions_2_4; +// mod reverse_sequence_different_dimensions_1_6; +// mod reverse_sequence_different_dimensions_3x9_batch; +// mod reverse_sequence_different_dimensions_3x9_time; +// mod conv_transpose; +// mod conv_transpose_1d; +// mod conv_transpose_3d; +// mod conv_transpose_attributes; +// mod conv_transpose_autopad_same; +// mod conv_transpose_dilations; +// mod conv_transpose_pads; +// mod conv_transpose_group_2; +// mod conv_transpose_group_2_image_3; +// mod depth_to_space_fp16x16; +// mod depth_to_space_fp8x23; +// mod depth_to_space_i32; +// mod depth_to_space_i8; +// mod depth_to_space_u32; +// mod space_to_depth_fp16x16; +// mod space_to_depth_fp8x23; +// mod space_to_depth_i32; +// mod space_to_depth_i8; +// mod space_to_depth_u32; +// mod scatter_nd_fp16x16_3d_default; +// mod scatter_nd_fp16x16_3d_add; +// mod scatter_nd_fp16x16_3d_mul; +// mod scatter_nd_fp16x16_3d_max; +// mod scatter_nd_fp16x16_3d_min; +// mod scatter_nd_fp8x23_3d_default; +// mod scatter_nd_fp8x23_3d_add; +// mod scatter_nd_fp8x23_3d_mul; +// mod scatter_nd_fp8x23_3d_max; +// mod scatter_nd_fp8x23_3d_min; +// mod scatter_nd_u32_default; +// mod scatter_nd_u32_add; +// mod scatter_nd_u32_mul; +// mod scatter_nd_u32_max; +// mod scatter_nd_u32_min; +// mod conv_2D_with_padding; +// mod conv_1D_no_padding; +// mod conv_1D_with_padding; +// mod conv_3D_no_padding; +// mod conv_3D_with_padding; +// mod conv_4D_no_padding; +// mod conv_2D_with_2_groups; +// mod conv_2D_with_autopad_same; +// mod conv_2D_with_strides_asymmetric_padding; +// mod conv_2D_with_strides_with_padding; +// mod conv_4D_with_padding; From 7d84a423a1605548fcaf40d112f4e2d8fc5485bb Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 22 Feb 2024 10:57:29 +0100 Subject: [PATCH 28/40] refactor squeeze to use axes of Option> --- nodegen/node/squeeze.py | 14 +- src/operators/tensor/core.cairo | 6 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- tests/nodes.cairo | 2082 ++++++++--------- tests/nodes/squeeze_fP16x16.cairo | 2 +- tests/nodes/squeeze_fP8x23.cairo | 2 +- tests/nodes/squeeze_i32.cairo | 2 +- tests/nodes/squeeze_i8.cairo | 2 +- tests/nodes/squeeze_u32.cairo | 2 +- 19 files changed, 1067 insertions(+), 1067 deletions(-) diff --git a/nodegen/node/squeeze.py b/nodegen/node/squeeze.py index 2f598e1ea..44d1d4a22 100644 --- a/nodegen/node/squeeze.py +++ b/nodegen/node/squeeze.py @@ -15,11 +15,11 @@ def squeeze(): name = "squeeze_i8" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod - def squeeze_i32(): + def squeeze(): def squeeze(): x = np.ones((1, 2, 1, 2, 1), dtype=np.int32) y = np.ones((2, 2, 1), dtype=np.int32) @@ -27,9 +27,9 @@ def squeeze(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "squeeze_i32" + name = "squeeze" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod @@ -43,7 +43,7 @@ def squeeze(): name = "squeeze_u32" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod @@ -59,7 +59,7 @@ def squeeze(): name = "squeeze_fP16x16" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() @staticmethod @@ -75,5 +75,5 @@ def squeeze(): name = "squeeze_fP8x23" make_test( - [x], y, "input_0.squeeze(Option::Some(array![0_i32, 2_i32].span()))", name) + [x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name) squeeze() diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 9342bf328..b4bc75d3d 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -3299,7 +3299,7 @@ trait TensorTrait { /// [1 1]] /// ``` /// - fn squeeze(self: @Tensor, axes: Option>) -> Tensor; + fn squeeze(self: @Tensor, axes: Option>) -> Tensor; /// # tensor.clip /// /// ```rust @@ -6041,7 +6041,7 @@ fn nonzero< } /// Cf: TensorTrait::squeeze docstring -fn squeeze(self: @Tensor, axes: Option>) -> Tensor { +fn squeeze(self: @Tensor, axes: Option>) -> Tensor { let target_shape = match axes { Option::Some(mut axes) => { let mut axis_squeezed = 0; @@ -6050,7 +6050,7 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { match axes.pop_front() { Option::Some(axis) => { let mut reshape: Array = ArrayTrait::new(); - let mut index = 0_i32; + let mut index = 0; let axis = if *axis < 0 { assert( *axis <= (*self.shape).len().into(), 'axis out of accepted range' diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index e75405743..a3e7c0a60 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -240,7 +240,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 84b3edb21..51b9326a5 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -322,7 +322,7 @@ impl Complex64Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 05dd23ea2..2b1c5cd5a 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -360,7 +360,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 54aadbd3e..fdbdbeee6 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -320,7 +320,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 7402cd761..bfdd22073 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -360,7 +360,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 4477b0025..ef16fe6d7 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -360,7 +360,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index c16b7feed..9a495ae7e 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -361,7 +361,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_ops::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 0e5ecc074..74c03cc4d 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -311,7 +311,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 5e637d4ff..825c4b3c6 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -353,7 +353,7 @@ impl I32Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index a5f9476c1..d9abf8feb 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -357,7 +357,7 @@ impl I8Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 00ab75b1d..f675d366b 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -300,7 +300,7 @@ impl U32Tensor of TensorTrait { core_tensor::nonzero(self) } - fn squeeze(self: @Tensor, axes: Option>) -> Tensor { + fn squeeze(self: @Tensor, axes: Option>) -> Tensor { core_tensor::squeeze(self, axes) } diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 337715889..8814cfb80 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1041 +1,1041 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmax_fp16x16_1D_default; -// mod argmax_fp16x16_1D_keepdims_false; -// mod argmax_fp16x16_1D_last_index; -// mod argmax_fp16x16_2D_default; -// mod argmax_fp16x16_2D_keepdims_false; -// mod argmax_fp16x16_2D_last_index; -// mod argmax_fp16x16_3D_default; -// mod argmax_fp16x16_3D_keepdims_false; -// mod argmax_fp16x16_3D_last_index; -// mod argmax_fp8x23_1D_default; -// mod argmax_fp8x23_1D_keepdims_false; -// mod argmax_fp8x23_1D_last_index; -// mod argmax_fp8x23_2D_default; -// mod argmax_fp8x23_2D_keepdims_false; -// mod argmax_fp8x23_2D_last_index; -// mod argmax_fp8x23_3D_default; -// mod argmax_fp8x23_3D_keepdims_false; -// mod argmax_fp8x23_3D_last_index; -// mod argmax_i32_1D_default; -// mod argmax_i32_1D_keepdims_false; -// mod argmax_i32_1D_last_index; -// mod argmax_i32_2D_default; -// mod argmax_i32_2D_keepdims_false; -// mod argmax_i32_2D_last_index; -// mod argmax_i32_3D_default; -// mod argmax_i32_3D_keepdims_false; -// mod argmax_i32_3D_last_index; -// mod argmax_i8_1D_default; -// mod argmax_i8_1D_keepdims_false; -// mod argmax_i8_1D_last_index; -// mod argmax_i8_2D_default; -// mod argmax_i8_2D_keepdims_false; -// mod argmax_i8_2D_last_index; -// mod argmax_i8_3D_default; -// mod argmax_i8_3D_keepdims_false; -// mod argmax_i8_3D_last_index; -// mod argmax_u32_1D_default; -// mod argmax_u32_1D_keepdims_false; -// mod argmax_u32_1D_last_index; -// mod argmax_u32_2D_default; -// mod argmax_u32_2D_keepdims_false; -// mod argmax_u32_2D_last_index; -// mod argmax_u32_3D_default; -// mod argmax_u32_3D_keepdims_false; -// mod argmax_u32_3D_last_index; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod reduce_sum_fp16x16_1D; -// mod reduce_sum_fp16x16_2D_default; -// mod reduce_sum_fp16x16_2D_keepdims; -// mod reduce_sum_fp16x16_2D_axis_1; -// mod reduce_sum_fp8x23_1D; -// mod reduce_sum_fp8x23_2D_default; -// mod reduce_sum_fp8x23_2D_keepdims; -// mod reduce_sum_fp8x23_2D_axis_1; -// mod reduce_sum_i32_1D; -// mod reduce_sum_i32_2D_default; -// mod reduce_sum_i32_2D_keepdims; -// mod reduce_sum_i32_2D_axis_1; -// mod reduce_sum_i8_1D; -// mod reduce_sum_i8_2D_default; -// mod reduce_sum_i8_2D_keepdims; -// mod reduce_sum_i8_2D_axis_1; -// mod reduce_sum_u32_1D; -// mod reduce_sum_u32_2D_default; -// mod reduce_sum_u32_2D_keepdims; -// mod reduce_sum_u32_2D_axis_1; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softmax_fp16x16; -// mod softmax_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod less_fp16x16; -// mod less_fp16x16_broadcast; -// mod less_fp8x23; -// mod less_fp8x23_broadcast; -// mod less_i32; -// mod less_i32_broadcast; -// mod less_i8; -// mod less_i8_broadcast; -// mod less_u32; -// mod less_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod gather_fp8x23_3d_default; -// mod gather_fp8x23_3d_axis1; -// mod gather_fp8x23_3d_axis2; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_i8_3d_default; -// mod gather_i8_3d_axis1; -// mod gather_i8_3d_axis2; -// mod gather_i32_3d_default; -// mod gather_i32_3d_axis1; -// mod gather_i32_3d_axis2; -// mod gather_u32_3d_default; -// mod gather_u32_3d_axis1; -// mod gather_u32_3d_axis2; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod gather_elements_fp16x16_3d_default; -// mod gather_elements_fp16x16_3d_axis1; -// mod gather_elements_fp16x16_3d_axis2; -// mod gather_elements_fp8x23_3d_default; -// mod gather_elements_fp8x23_3d_axis1; -// mod gather_elements_fp8x23_3d_axis2; -// mod gather_elements_i8_3d_default; -// mod gather_elements_i8_3d_axis1; -// mod gather_elements_i32_3d_default; -// mod gather_elements_i32_3d_axis1; -// mod gather_elements_i32_3d_axis2; -// mod gather_elements_u32_default; -// mod gather_elements_u32_axis1; -// mod gather_elements_u32_axis2; -// mod gather_elements_u32_axis3; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_nan_fp8x23; -// mod is_inf_fp16x16; -// mod is_inf_fp8x23; -// mod is_inf_i32; -// mod is_inf_i8; -// mod is_inf_u32; -// mod is_pos_inf_fp16x16; -// mod is_neg_inf_fp16x16; -// mod is_pos_inf_fp8x23; -// mod is_neg_inf_fp8x23; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -// mod is_pos_inf_i8; -// mod is_neg_inf_i8; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; -// mod grid_sample; -// mod grid_sample_cubic; -// mod grid_sample_aligncorners; -// mod grid_sample_nearest; -// mod grid_sample_nearest_aligncorner; -// mod grid_sample_padding_border; -// mod grid_sample_padding_reflection; -// mod grid_sample_padding_zeros; -// mod col2im; -// mod col2im_5D; -// mod col2im_dilations; -// mod col2im_pads; -// mod col2im_strides; -// mod random_uniform_like_fp16x16; -// mod random_uniform_like_fp8x23; -// mod range_fp8x23; -// mod range_fp16x16; -// mod range_i32; -// mod range_i8; -// mod range_u32; -// mod hann_window_fp8x23; -// mod hann_window_fp16x16; -// mod hamming_window_fp16x16; -// mod hamming_window_fp8x23; -// mod blackman_window_fp16x16; -// mod blackman_window_fp8x23; -// mod split_to_sequence_fp16x16_1d_equal_parts; -// mod split_to_sequence_fp16x16_1d_variable_parts; -// mod split_to_sequence_fp16x16_2d_equal_parts; -// mod split_to_sequence_fp16x16_2d_variable_parts; -// mod split_to_sequence_fp16x16_zero_size; -// mod split_to_sequence_fp16x16_1d_uneven; -// mod split_to_sequence_fp16x16_2d_uneven; -// mod split_to_sequence_u32_1d_equal_parts; -// mod split_to_sequence_u32_1d_variable_parts; -// mod split_to_sequence_u32_2d_equal_parts; -// mod split_to_sequence_u32_2d_variable_parts; -// mod split_to_sequence_u32_zero_size; -// mod split_to_sequence_u32_1d_uneven; -// mod split_to_sequence_u32_2d_uneven; -// mod split_to_sequence_2d_scalar; -// mod split_to_sequence_2d_nokeepdims; -// mod split_to_sequence_1d_nokeepdims; -// mod reverse_sequence_fp16x16_batch_equal_parts; -// mod reverse_sequence_fp16x16_time_equal_parts; -// mod reverse_sequence_i32_batch_equal_parts; -// mod reverse_sequence_i32_time_equal_parts; -// mod reverse_sequence_i8_batch_equal_parts; -// mod reverse_sequence_i8_time_equal_parts; -// mod reverse_sequence_u32_4x4_batch; -// mod reverse_sequence_u32_4x4_time; -// mod reverse_sequence_u32_3x3_batch; -// mod reverse_sequence_u32_3x3_time; -// mod reverse_sequence_different_dimensions_4_5; -// mod reverse_sequence_different_dimensions_2_4; -// mod reverse_sequence_different_dimensions_1_6; -// mod reverse_sequence_different_dimensions_3x9_batch; -// mod reverse_sequence_different_dimensions_3x9_time; -// mod conv_transpose; -// mod conv_transpose_1d; -// mod conv_transpose_3d; -// mod conv_transpose_attributes; -// mod conv_transpose_autopad_same; -// mod conv_transpose_dilations; -// mod conv_transpose_pads; -// mod conv_transpose_group_2; -// mod conv_transpose_group_2_image_3; -// mod depth_to_space_fp16x16; -// mod depth_to_space_fp8x23; -// mod depth_to_space_i32; -// mod depth_to_space_i8; -// mod depth_to_space_u32; -// mod space_to_depth_fp16x16; -// mod space_to_depth_fp8x23; -// mod space_to_depth_i32; -// mod space_to_depth_i8; -// mod space_to_depth_u32; -// mod scatter_nd_fp16x16_3d_default; -// mod scatter_nd_fp16x16_3d_add; -// mod scatter_nd_fp16x16_3d_mul; -// mod scatter_nd_fp16x16_3d_max; -// mod scatter_nd_fp16x16_3d_min; -// mod scatter_nd_fp8x23_3d_default; -// mod scatter_nd_fp8x23_3d_add; -// mod scatter_nd_fp8x23_3d_mul; -// mod scatter_nd_fp8x23_3d_max; -// mod scatter_nd_fp8x23_3d_min; -// mod scatter_nd_u32_default; -// mod scatter_nd_u32_add; -// mod scatter_nd_u32_mul; -// mod scatter_nd_u32_max; -// mod scatter_nd_u32_min; -// mod conv_2D_with_padding; -// mod conv_1D_no_padding; -// mod conv_1D_with_padding; -// mod conv_3D_no_padding; -// mod conv_3D_with_padding; -// mod conv_4D_no_padding; -// mod conv_2D_with_2_groups; -// mod conv_2D_with_autopad_same; -// mod conv_2D_with_strides_asymmetric_padding; -// mod conv_2D_with_strides_with_padding; -// mod conv_4D_with_padding; +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmax_fp16x16_1D_default; +mod argmax_fp16x16_1D_keepdims_false; +mod argmax_fp16x16_1D_last_index; +mod argmax_fp16x16_2D_default; +mod argmax_fp16x16_2D_keepdims_false; +mod argmax_fp16x16_2D_last_index; +mod argmax_fp16x16_3D_default; +mod argmax_fp16x16_3D_keepdims_false; +mod argmax_fp16x16_3D_last_index; +mod argmax_fp8x23_1D_default; +mod argmax_fp8x23_1D_keepdims_false; +mod argmax_fp8x23_1D_last_index; +mod argmax_fp8x23_2D_default; +mod argmax_fp8x23_2D_keepdims_false; +mod argmax_fp8x23_2D_last_index; +mod argmax_fp8x23_3D_default; +mod argmax_fp8x23_3D_keepdims_false; +mod argmax_fp8x23_3D_last_index; +mod argmax_i32_1D_default; +mod argmax_i32_1D_keepdims_false; +mod argmax_i32_1D_last_index; +mod argmax_i32_2D_default; +mod argmax_i32_2D_keepdims_false; +mod argmax_i32_2D_last_index; +mod argmax_i32_3D_default; +mod argmax_i32_3D_keepdims_false; +mod argmax_i32_3D_last_index; +mod argmax_i8_1D_default; +mod argmax_i8_1D_keepdims_false; +mod argmax_i8_1D_last_index; +mod argmax_i8_2D_default; +mod argmax_i8_2D_keepdims_false; +mod argmax_i8_2D_last_index; +mod argmax_i8_3D_default; +mod argmax_i8_3D_keepdims_false; +mod argmax_i8_3D_last_index; +mod argmax_u32_1D_default; +mod argmax_u32_1D_keepdims_false; +mod argmax_u32_1D_last_index; +mod argmax_u32_2D_default; +mod argmax_u32_2D_keepdims_false; +mod argmax_u32_2D_last_index; +mod argmax_u32_3D_default; +mod argmax_u32_3D_keepdims_false; +mod argmax_u32_3D_last_index; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod reduce_sum_fp16x16_1D; +mod reduce_sum_fp16x16_2D_default; +mod reduce_sum_fp16x16_2D_keepdims; +mod reduce_sum_fp16x16_2D_axis_1; +mod reduce_sum_fp8x23_1D; +mod reduce_sum_fp8x23_2D_default; +mod reduce_sum_fp8x23_2D_keepdims; +mod reduce_sum_fp8x23_2D_axis_1; +mod reduce_sum_i32_1D; +mod reduce_sum_i32_2D_default; +mod reduce_sum_i32_2D_keepdims; +mod reduce_sum_i32_2D_axis_1; +mod reduce_sum_i8_1D; +mod reduce_sum_i8_2D_default; +mod reduce_sum_i8_2D_keepdims; +mod reduce_sum_i8_2D_axis_1; +mod reduce_sum_u32_1D; +mod reduce_sum_u32_2D_default; +mod reduce_sum_u32_2D_keepdims; +mod reduce_sum_u32_2D_axis_1; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softmax_fp16x16; +mod softmax_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod gather_fp8x23_3d_default; +mod gather_fp8x23_3d_axis1; +mod gather_fp8x23_3d_axis2; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_i8_3d_default; +mod gather_i8_3d_axis1; +mod gather_i8_3d_axis2; +mod gather_i32_3d_default; +mod gather_i32_3d_axis1; +mod gather_i32_3d_axis2; +mod gather_u32_3d_default; +mod gather_u32_3d_axis1; +mod gather_u32_3d_axis2; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod gather_elements_fp16x16_3d_default; +mod gather_elements_fp16x16_3d_axis1; +mod gather_elements_fp16x16_3d_axis2; +mod gather_elements_fp8x23_3d_default; +mod gather_elements_fp8x23_3d_axis1; +mod gather_elements_fp8x23_3d_axis2; +mod gather_elements_i8_3d_default; +mod gather_elements_i8_3d_axis1; +mod gather_elements_i32_3d_default; +mod gather_elements_i32_3d_axis1; +mod gather_elements_i32_3d_axis2; +mod gather_elements_u32_default; +mod gather_elements_u32_axis1; +mod gather_elements_u32_axis2; +mod gather_elements_u32_axis3; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; diff --git a/tests/nodes/squeeze_fP16x16.cairo b/tests/nodes/squeeze_fP16x16.cairo index b595e5100..83748ca1f 100644 --- a/tests/nodes/squeeze_fP16x16.cairo +++ b/tests/nodes/squeeze_fP16x16.cairo @@ -14,7 +14,7 @@ fn test_squeeze_fP16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_fP8x23.cairo b/tests/nodes/squeeze_fP8x23.cairo index 0ee6f8a15..57d418686 100644 --- a/tests/nodes/squeeze_fP8x23.cairo +++ b/tests/nodes/squeeze_fP8x23.cairo @@ -14,7 +14,7 @@ fn test_squeeze_fP8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_i32.cairo b/tests/nodes/squeeze_i32.cairo index ce880a31c..b4b09d25d 100644 --- a/tests/nodes/squeeze_i32.cairo +++ b/tests/nodes/squeeze_i32.cairo @@ -14,7 +14,7 @@ fn test_squeeze_i32() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_i8.cairo b/tests/nodes/squeeze_i8.cairo index 1a5f9147f..0634f1213 100644 --- a/tests/nodes/squeeze_i8.cairo +++ b/tests/nodes/squeeze_i8.cairo @@ -14,7 +14,7 @@ fn test_squeeze_i8() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } diff --git a/tests/nodes/squeeze_u32.cairo b/tests/nodes/squeeze_u32.cairo index 15939db4a..bf90027b6 100644 --- a/tests/nodes/squeeze_u32.cairo +++ b/tests/nodes/squeeze_u32.cairo @@ -14,7 +14,7 @@ fn test_squeeze_u32() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.squeeze(Option::Some(array![0_i32, 2_i32].span())); + let y = input_0.squeeze(Option::Some(array![0, 2].span())); assert(y.shape == z.shape, 'shapes do not match'); } From 14d249a2671a97ae1bee35c6e76c9231b3e54003 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 4 Mar 2024 09:07:53 +0100 Subject: [PATCH 29/40] make it compile --- src/operators/tensor/math.cairo | 1 - src/operators/tensor/math/reduce_log_sum_exp.cairo | 1 - 2 files changed, 2 deletions(-) diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index f4ac4ec24..b73f6d102 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -59,7 +59,6 @@ mod gather_nd; mod reduce_log_sum; mod erf; mod reduce_log_sum_exp; -mod compress; mod layer_normalization; mod resize; mod compress; diff --git a/src/operators/tensor/math/reduce_log_sum_exp.cairo b/src/operators/tensor/math/reduce_log_sum_exp.cairo index 0272f019c..5fd57ab76 100644 --- a/src/operators/tensor/math/reduce_log_sum_exp.cairo +++ b/src/operators/tensor/math/reduce_log_sum_exp.cairo @@ -5,7 +5,6 @@ use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::numbers::signed_integer::integer_trait::IntegerTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast}; From 853e7d1d275da11a090f88b7095727d2ea7ad346 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 4 Mar 2024 09:08:46 +0100 Subject: [PATCH 30/40] update doc --- .../linear-classifier/linear_classifier.predict.md | 4 ++-- .../linear-regressor/linear_regressor.predict.md | 8 ++++---- .../tree_ensemble_classifier.predict.md | 4 ++-- .../tree_ensemble_regressor.predict.md | 4 ++-- docs/framework/operators/neural-network/nn.col2im.md | 1 - 5 files changed, 10 insertions(+), 11 deletions(-) diff --git a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md index aec154f68..7ed30f236 100644 --- a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md +++ b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md @@ -1,7 +1,7 @@ # LinearClassifierTrait::predict ```rust - fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor; + fn predict(classifier: LinearClassifier, X: Tensor) -> Tensor; ``` Linear Classifier. Performs the linear classification. @@ -85,7 +85,7 @@ fn linear_classifier_helper( fn linear_classifier_multi_softmax() -> (Span, Tensor) { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); (labels, scores) } diff --git a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md index f1bd38831..6c40ac930 100644 --- a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md +++ b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md @@ -1,14 +1,14 @@ # LinearRegressorTrait::predict ```rust - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; ``` Linear Regressor. Performs the generalized linear regression evaluation. ## Args -* `self`: LinearRegressor - A LinearRegressor object. +* `regressor`: LinearRegressor - A LinearRegressor object. * `X`: Input 2D tensor. ## Returns @@ -68,7 +68,7 @@ fn example_linear_regressor() -> Tensor { post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); scores } @@ -120,7 +120,7 @@ fn example_linear_regressor_2() -> Tensor { post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); scores } diff --git a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md index 6d839e873..c38f3e46d 100644 --- a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md +++ b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md @@ -1,7 +1,7 @@ # TreeEnsembleClassifier::predict ```rust - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); ``` Tree Ensemble classifier. Returns the top class for each of N inputs. @@ -185,7 +185,7 @@ fn tree_ensemble_classifier_helper( fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span, MutMatrix::) { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X); (labels, scores) } diff --git a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md index 812115971..243bda558 100644 --- a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md +++ b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md @@ -1,7 +1,7 @@ # TreeEnsembleRegressor::predict ```rust - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); ``` Tree Ensemble regressor. Returns the regressed values for each input in N. @@ -160,7 +160,7 @@ fn tree_ensemble_regressor_helper( fn test_tree_ensemble_regressor_SUM() -> MutMatrix:: { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); res } >>> diff --git a/docs/framework/operators/neural-network/nn.col2im.md b/docs/framework/operators/neural-network/nn.col2im.md index fd5e82ffa..6c7b1af05 100644 --- a/docs/framework/operators/neural-network/nn.col2im.md +++ b/docs/framework/operators/neural-network/nn.col2im.md @@ -1,4 +1,3 @@ - # NNTrait::col2im ```rust From 1e593fb60e388fa5a8001903882db96e71e594b4 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 4 Mar 2024 09:13:35 +0100 Subject: [PATCH 31/40] fix all-contributorsrc --- .all-contributorsrc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index befc08f85..8d4b73289 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -277,8 +277,9 @@ "profile": "https://github.com/tekkac", "contributions": [ "doc" - ], + ] }, + { "login": "canacechan", "name": "canacechan", "avatar_url": "https://avatars.githubusercontent.com/u/127183619?v=4", @@ -295,4 +296,4 @@ "projectName": "orion", "projectOwner": "gizatechxyz", "commitType": "docs" -} +} \ No newline at end of file From 1fbcd144abceadeebb48f9ff9093c5b64e114b15 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 4 Mar 2024 09:26:00 +0100 Subject: [PATCH 32/40] fix .all-contributorsrc --- .all-contributorsrc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index befc08f85..002ea0590 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -277,8 +277,9 @@ "profile": "https://github.com/tekkac", "contributions": [ "doc" - ], + ] }, + { "login": "canacechan", "name": "canacechan", "avatar_url": "https://avatars.githubusercontent.com/u/127183619?v=4", @@ -286,6 +287,15 @@ "contributions": [ "code" ] + }, + { + "login": "Beeyoung", + "name": "canacechan", + "avatar_url": "https://avatars.githubusercontent.com/u/127183619?v=4", + "profile": "https://github.com/canacechan", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, @@ -295,4 +305,4 @@ "projectName": "orion", "projectOwner": "gizatechxyz", "commitType": "docs" -} +} \ No newline at end of file From c4efe3960ce2e9ebf8e947548cda842af289e0ea Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 4 Mar 2024 09:27:43 +0100 Subject: [PATCH 33/40] Update .all-contributorsrc --- .all-contributorsrc | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 002ea0590..8d4b73289 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -287,15 +287,6 @@ "contributions": [ "code" ] - }, - { - "login": "Beeyoung", - "name": "canacechan", - "avatar_url": "https://avatars.githubusercontent.com/u/127183619?v=4", - "profile": "https://github.com/canacechan", - "contributions": [ - "code" - ] } ], "contributorsPerLine": 7, From 7dd923a8a0e3c3c80127d744849837ee67d0096a Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 08:29:05 +0000 Subject: [PATCH 34/40] docs: update README.md [skip ci] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cc2cf1ef2..63a91db69 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ # Orion: An Open-source Framework for Validity and ZK ML ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-30-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-32-orange.svg?style=flat-square)](#contributors-) Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/). @@ -108,6 +108,7 @@ Thanks goes to these wonderful people: Vid Kersic
Vid Kersic

💻 Trunks @ Carbonable
Trunks @ Carbonable

📖 canacechan
canacechan

💻 + Beeyoung
Beeyoung

💻 From e226b2acb9d5a9e96ccc028bffaed65a54cf1940 Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 08:29:06 +0000 Subject: [PATCH 35/40] docs: update .all-contributorsrc [skip ci] --- .all-contributorsrc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 8d4b73289..a8f2e5d43 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -287,6 +287,15 @@ "contributions": [ "code" ] + }, + { + "login": "FriendlyLifeguard", + "name": "Beeyoung", + "avatar_url": "https://avatars.githubusercontent.com/u/55970530?v=4", + "profile": "http://alankang.xyz", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, @@ -296,4 +305,4 @@ "projectName": "orion", "projectOwner": "gizatechxyz", "commitType": "docs" -} \ No newline at end of file +} From 0b4d9a008bdf89c20f850fba7b27bbc4c4f32cbd Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 08:31:15 +0000 Subject: [PATCH 36/40] docs: update README.md [skip ci] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cc2cf1ef2..b5395adfc 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ # Orion: An Open-source Framework for Validity and ZK ML ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-30-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-32-orange.svg?style=flat-square)](#contributors-) Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/). @@ -108,6 +108,7 @@ Thanks goes to these wonderful people: Vid Kersic
Vid Kersic

💻 Trunks @ Carbonable
Trunks @ Carbonable

📖 canacechan
canacechan

💻 + Kugo
Kugo

📖 From ff4f8174a8110442dfc8ccdb9b8c3e01ddfac94d Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 08:31:16 +0000 Subject: [PATCH 37/40] docs: update .all-contributorsrc [skip ci] --- .all-contributorsrc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 8d4b73289..da5c013ef 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -287,6 +287,15 @@ "contributions": [ "code" ] + }, + { + "login": "Gakunt", + "name": "Kugo", + "avatar_url": "https://avatars.githubusercontent.com/u/153402253?v=4", + "profile": "https://github.com/Gakunt", + "contributions": [ + "doc" + ] } ], "contributorsPerLine": 7, @@ -296,4 +305,4 @@ "projectName": "orion", "projectOwner": "gizatechxyz", "commitType": "docs" -} \ No newline at end of file +} From 9402b38461904da5a19cf4d39c59f4eb41dcc1aa Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 4 Mar 2024 10:10:41 +0100 Subject: [PATCH 38/40] make it compile --- .../ml/linear/linear_classifier.cairo | 81 +++++++------ .../ml/linear/linear_regressor.cairo | 14 ++- src/operators/ml/svm/svm_classifier.cairo | 14 ++- src/operators/ml/svm/svm_regressor.cairo | 14 ++- .../tree_ensemble_classifier.cairo | 111 +++++++++++++----- .../tree_ensemble_regressor.cairo | 71 ++++++++--- 6 files changed, 210 insertions(+), 95 deletions(-) diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index fde3772d8..f7c1f82c3 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -1,5 +1,11 @@ -use orion::numbers::{FP16x16, FP32x32, FP32x32Impl, FixedTrait}; +use core::array::ArrayTrait; +use core::array::SpanTrait; +use orion::numbers::FP16x16; + +use orion::operators::tensor::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::operators::ml::POST_TRANSFORM; @@ -199,48 +205,56 @@ impl LinearClassifierImpl< let mut i = 0; match classifier.post_transform { POST_TRANSFORM::NONE => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::zero() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::zero() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::SOFTMAX => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::half() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::half() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::LOGISTIC => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::half() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::half() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::SOFTMAXZERO => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::half() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::half() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; @@ -257,4 +271,3 @@ fn max(a: usize, b: usize) -> usize { b } } - diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index 6e240bbc6..d15f55f89 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -1,7 +1,17 @@ -use core::debug::PrintTrait; +use core::array::ArrayTrait; +use core::clone::Clone; +use core::traits::Into; +use core::array::SpanTrait; +use core::dict::Felt252DictTrait; +use core::dict::Felt252DictEntryTrait; +use orion::numbers::FP16x16; -use orion::numbers::{FP16x16, FP32x32, FP32x32Impl, FixedTrait}; +use orion::operators::tensor::{Tensor, TensorTrait}; use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait}; + +use core::debug::PrintTrait; use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::operators::ml::POST_TRANSFORM; diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo index 3886273a3..8dacddfc0 100644 --- a/src/operators/ml/svm/svm_classifier.cairo +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -1,13 +1,16 @@ +use core::array::ArrayTrait; use orion::numbers::NumberTrait; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FP64x64, FP64x64Impl, FixedTrait}; -use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; -use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; -use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; -use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; + use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; + +use orion::numbers::{FP64x64, FP64x64Impl}; +use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor}; +use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; use orion::utils::get_row; use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; @@ -34,6 +37,7 @@ enum MODE { SVM_SVC, } + /// /// predict - Returns the top class for each of N inputs. trait SVMClassifierTrait { diff --git a/src/operators/ml/svm/svm_regressor.cairo b/src/operators/ml/svm/svm_regressor.cairo index f984c8480..d69e40d80 100644 --- a/src/operators/ml/svm/svm_regressor.cairo +++ b/src/operators/ml/svm/svm_regressor.cairo @@ -1,15 +1,19 @@ -use core::debug::PrintTrait; - +use core::traits::TryInto; +use core::array::ArrayTrait; +use core::array::SpanTrait; +use core::traits::Into; use orion::numbers::NumberTrait; -use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; -use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; -use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; +use core::debug::PrintTrait; +use orion::operators::nn::{NNTrait, FP16x16NN}; use orion::utils::get_row; use orion::operators::ml::POST_TRANSFORM; +use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; + #[derive(Copy, Drop, Destruct)] struct SVMRegressor { coefficients: Span, diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index 1b60e1e0b..c5031da25 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -1,14 +1,29 @@ -use core::debug::PrintTrait; +use core::array::ArrayTrait; +use core::clone::Clone; +use core::box::BoxTrait; +use core::traits::Into; +use core::option::OptionTrait; +use orion::operators::matrix::MutMatrixTrait; +use core::array::SpanTrait; +use core::nullable::NullableTrait; +use core::dict::Felt252DictTrait; +use core::dict::Felt252DictEntryTrait; use core::nullable::{match_nullable, FromNullableResult}; +use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; +use orion::numbers::NumberTrait; +use orion::utils::get_row; + use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::{SpanTraitExt}; -use orion::numbers::NumberTrait; -use orion::operators::matrix::{MutMatrix, MutMatrixTrait, MutMatrixImpl}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; use orion::operators::ml::POST_TRANSFORM; +use core::debug::PrintTrait; + #[derive(Destruct)] struct TreeEnsembleClassifier { ensemble: TreeEnsemble, @@ -254,9 +269,17 @@ impl TreeEnsembleClassifierImpl< if classifier.base_values.is_some() { let mut base_values = classifier.base_values.unwrap(); let mut row: usize = 0; - while row != res.rows { + loop { + if row == res.rows { + break; + } + let mut col: usize = 0; - while col != res.cols { + loop { + if col == res.cols { + break; + } + let value = *base_values.pop_front().unwrap(); res.set(row, col, value); @@ -267,9 +290,17 @@ impl TreeEnsembleClassifierImpl< } } else { let mut row: usize = 0; - while row != res.rows { + loop { + if row == res.rows { + break; + } + let mut col: usize = 0; - while col != res.cols { + loop { + if col == res.cols { + break; + } + res.set(row, col, NumberTrait::zero()); col += 1 @@ -281,9 +312,13 @@ impl TreeEnsembleClassifierImpl< let mut class_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; - while i != self.class_treeids.len() { - let tid = *self.class_treeids[i]; - let nid = *self.class_nodeids[i]; + loop { + if i == classifier.class_treeids.len() { + break; + } + + let tid = *classifier.class_treeids[i]; + let nid = *classifier.class_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); @@ -300,9 +335,12 @@ impl TreeEnsembleClassifierImpl< i += 1; }; - let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } + let mut indices = get_row(@leaves_index, i); let mut t_index: Array> = ArrayTrait::new(); loop { @@ -353,7 +391,6 @@ impl TreeEnsembleClassifierImpl< Option::None => { break; } }; }; - i += 1; }; @@ -367,8 +404,10 @@ impl TreeEnsembleClassifierImpl< Option::Some(c_id) => { class_id = *c_id; }, Option::None => { class_id = 0; } }; - - while i != self.class_ids.len() { + loop { + if i == classifier.class_ids.len() { + break; + } match class_ids.pop_front() { Option::Some(c_id) => { if *c_id == class_id { @@ -387,19 +426,24 @@ impl TreeEnsembleClassifierImpl< if binary { let mut new_res: MutMatrix = MutMatrixImpl::new(res.rows, res.cols); let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } // Exchange match res.get(i, 0) { Option::Some(res_0) => { new_res.set(i, 1, res_0); }, Option::None => { new_res.set(i, 1, NumberTrait::zero()); }, }; - i += 1; }; match classifier.post_transform { POST_TRANSFORM::NONE => { let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { @@ -408,49 +452,57 @@ impl TreeEnsembleClassifierImpl< }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i += 1; }; }, POST_TRANSFORM::SOFTMAX => { let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i += 1; }; }, POST_TRANSFORM::LOGISTIC => { let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i += 1; }; }, POST_TRANSFORM::SOFTMAXZERO => { let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i += 1; }; }, POST_TRANSFORM::PROBIT => { let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } // Exchange match new_res.get(i, 1) { Option::Some(res_1) => { @@ -459,12 +511,10 @@ impl TreeEnsembleClassifierImpl< }, Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; - i += 1; }; }, }; - res = new_res; } @@ -480,7 +530,7 @@ impl TreeEnsembleClassifierImpl< // Labels let mut labels = new_scores.argmax(1); - let mut labels_list: Array = array![]; + let mut labels_list = ArrayTrait::new(); loop { match labels.pop_front() { Option::Some(i) => { labels_list.append(*classifier.classlabels[*i]); }, @@ -488,7 +538,6 @@ impl TreeEnsembleClassifierImpl< }; }; - (labels_list.span(), new_scores) + return (labels_list.span(), new_scores); } } - diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index 1d57423f2..bef507d73 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -1,17 +1,30 @@ -use core::debug::PrintTrait; +use core::array::ArrayTrait; +use core::clone::Clone; +use core::box::BoxTrait; +use core::traits::Into; +use core::option::OptionTrait; +use orion::operators::matrix::MutMatrixTrait; +use core::array::SpanTrait; +use core::nullable::NullableTrait; +use core::dict::Felt252DictTrait; +use core::dict::Felt252DictEntryTrait; use core::nullable::{match_nullable, FromNullableResult}; + +use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; +use orion::numbers::NumberTrait; +use orion::utils::get_row; + use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; use alexandria_data_structures::array_ext::{SpanTraitExt}; -use orion::numbers::NumberTrait; -use orion::operators::matrix::{MutMatrix, MutMatrixTrait, MutMatrixImpl}; -use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait}; -use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; -use orion::utils::get_row; use orion::operators::ml::POST_TRANSFORM; +use core::debug::PrintTrait; + #[derive(Destruct)] struct TreeEnsembleRegressor { ensemble: TreeEnsemble, @@ -237,9 +250,13 @@ impl TreeEnsembleRegressorImpl< let mut target_index: Felt252Dict>> = Default::default(); let mut i: usize = 0; - while i != self.target_treeids.len() { - let tid = *self.target_treeids[i]; - let nid = *self.target_nodeids[i]; + loop { + if i == regressor.target_treeids.len() { + break; + } + + let tid = *regressor.target_treeids[i]; + let nid = *regressor.target_nodeids[i]; let mut key = PedersenHasherImpl::new(); let key: felt252 = key.hash(tid.into(), nid.into()); @@ -258,7 +275,11 @@ impl TreeEnsembleRegressorImpl< }; let mut i: usize = 0; - while i != res.rows { + loop { + if i == res.rows { + break; + } + let mut indices = get_row(@leaves_index, i); let mut t_index: Array> = ArrayTrait::new(); loop { @@ -275,7 +296,6 @@ impl TreeEnsembleRegressorImpl< Option::None => { break; } }; }; - let mut t_index = t_index.span(); match regressor.aggregate_function { @@ -286,7 +306,6 @@ impl TreeEnsembleRegressorImpl< AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref regressor, ref res, ref t_index, i); }, AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref regressor, ref res, ref t_index, i); }, }; - i += 1; }; @@ -294,9 +313,17 @@ impl TreeEnsembleRegressorImpl< if regressor.base_values.is_some() { let mut base_values = regressor.base_values.unwrap(); let mut row: usize = 0; - while row != res.rows { + loop { + if row == res.rows { + break; + } + let mut col: usize = 0; - while col != res.cols { + loop { + if col == res.cols { + break; + } + let value = *base_values.pop_front().unwrap(); match res.get(row, col) { Option::Some(val) => { res.set(row, col, val + value); }, @@ -319,10 +346,11 @@ impl TreeEnsembleRegressorImpl< POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; - new_scores + return new_scores; } } + fn compute_res_SUM< T, MAG, @@ -455,7 +483,10 @@ fn compute_res_MIN< i: usize ) { let mut j = 0; - while j != res.cols { + loop { + if j == res.cols { + break; + } res.set(i, j, NumberTrait::max_value()); j += 1; }; @@ -489,6 +520,7 @@ fn compute_res_MIN< }; } + fn compute_res_MAX< T, MAG, @@ -511,7 +543,10 @@ fn compute_res_MAX< i: usize ) { let mut j = 0; - while j != res.cols { + loop { + if j == res.cols { + break; + } res.set(i, j, NumberTrait::min_value()); j += 1; }; @@ -543,4 +578,4 @@ fn compute_res_MAX< Option::None => { break; } }; }; -} +} \ No newline at end of file From 3b733036d3b0796b05f1a0628e740eb0cd630e52 Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 09:12:10 +0000 Subject: [PATCH 39/40] docs: update README.md [skip ci] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cc2cf1ef2..366920c1e 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ # Orion: An Open-source Framework for Validity and ZK ML ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-30-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-32-orange.svg?style=flat-square)](#contributors-) Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/). @@ -108,6 +108,7 @@ Thanks goes to these wonderful people: Vid Kersic
Vid Kersic

💻 Trunks @ Carbonable
Trunks @ Carbonable

📖 canacechan
canacechan

💻 + Tristan
Tristan

💻 From f01e841cf180415db5b036429a75d60c0756528b Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 09:12:11 +0000 Subject: [PATCH 40/40] docs: update .all-contributorsrc [skip ci] --- .all-contributorsrc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 8d4b73289..2461b613e 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -287,6 +287,15 @@ "contributions": [ "code" ] + }, + { + "login": "TAdev0", + "name": "Tristan", + "avatar_url": "https://avatars.githubusercontent.com/u/122918260?v=4", + "profile": "https://nodeguardians.io/character/98995858fd55 ", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, @@ -296,4 +305,4 @@ "projectName": "orion", "projectOwner": "gizatechxyz", "commitType": "docs" -} \ No newline at end of file +}